提交 09262653 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: hns3: update hns3 driver from drivers

Sync hns3 driver from drivers team.
Based on 5db38ff635972569325bbf95db88fa3558f27ea7
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 0961e03b
......@@ -9,6 +9,6 @@ obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o kcompat.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
......@@ -21,6 +21,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
......@@ -36,6 +37,10 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
};
/* below are per-VF mac-vlan subcodes */
......@@ -46,9 +51,6 @@ enum hclge_mbx_mac_vlan_subcode {
HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
};
/* below are per-VF vlan cfg subcodes */
......@@ -88,6 +90,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
struct hclge_vf_rst_cmd {
u8 dest_vfid;
u8 vf_rst;
u8 rsv[22];
};
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
......
......@@ -29,8 +29,8 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
return false;
}
static void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited)
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited)
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
......@@ -46,6 +46,7 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
break;
}
}
EXPORT_SYMBOL(hnae3_set_client_init_flag);
static int hnae3_get_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
......@@ -86,14 +87,11 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* now, (un-)instantiate client by calling lower layer */
if (is_reg) {
ret = ae_dev->ops->init_client_instance(client, ae_dev);
if (ret) {
if (ret)
dev_err(&ae_dev->pdev->dev,
"fail to instantiate client, ret = %d\n", ret);
return ret;
}
hnae3_set_client_init_flag(client, ae_dev, 1);
return 0;
return ret;
}
if (hnae3_get_client_init_flag(client, ae_dev)) {
......@@ -240,7 +238,7 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo);
* @ae_dev: the AE device
* NOTE: the duplicated name will not be checked
*/
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
{
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
......@@ -261,6 +259,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
ret = -EOPNOTSUPP;
goto out_err;
}
......@@ -287,8 +286,15 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
ret);
}
mutex_unlock(&hnae3_common_lock);
return 0;
out_err:
list_del(&ae_dev->node);
mutex_unlock(&hnae3_common_lock);
return ret;
}
EXPORT_SYMBOL(hnae3_register_ae_dev);
......
......@@ -45,12 +45,16 @@
#define HNAE3_CLASS_NAME_SIZE 16
#define HNAE3_REVISION_ID_20 0x20
#define HNAE3_REVISION_ID_21 0x21
#define HNAE3_DEV_INITED_B 0x0
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
#define HNAE3_DEV_SUPPORT_DCB_B 0x2
#define HNAE3_KNIC_CLIENT_INITED_B 0x3
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
......@@ -61,6 +65,9 @@
#define hnae3_dev_dcb_supported(hdev) \
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
......@@ -79,15 +86,17 @@ struct hnae3_queue {
struct hnae3_handle *handle;
int tqp_index; /* index in a handle */
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
u16 tx_desc_num;/* total number of tx desc */
u16 rx_desc_num;/* total number of rx desc */
};
/*hnae3 loop mode*/
enum hnae3_loop {
HNAE3_MAC_INTER_LOOP_MAC,
HNAE3_MAC_INTER_LOOP_SERDES,
HNAE3_MAC_INTER_LOOP_PHY,
HNAE3_MAC_LOOP_NONE,
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
HNAE3_LOOP_PARALLEL_SERDES,
HNAE3_LOOP_PHY,
HNAE3_LOOP_NONE,
};
enum hnae3_client_type {
......@@ -107,6 +116,7 @@ enum hnae3_media_type {
HNAE3_MEDIA_TYPE_FIBER,
HNAE3_MEDIA_TYPE_COPPER,
HNAE3_MEDIA_TYPE_BACKPLANE,
HNAE3_MEDIA_TYPE_NONE,
};
enum hnae3_reset_notify_type {
......@@ -114,18 +124,28 @@ enum hnae3_reset_notify_type {
HNAE3_DOWN_CLIENT,
HNAE3_INIT_CLIENT,
HNAE3_UNINIT_CLIENT,
HNAE3_RESTORE_CLIENT,
};
enum hnae3_reset_type {
HNAE3_VF_RESET,
HNAE3_VF_FUNC_RESET,
HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
};
enum hnae3_flr_state {
HNAE3_FLR_DOWN,
HNAE3_FLR_DONE,
};
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
......@@ -173,6 +193,7 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
enum hnae3_dev_type dev_type;
enum hnae3_reset_type reset_type;
void *priv;
};
......@@ -190,6 +211,10 @@ struct hnae3_ae_dev {
* Enable the hardware
* stop()
* Disable the hardware
* start_client()
* Inform the hclge that client has been started
* stop_client()
* Inform the hclge that client has been stopped
* get_status()
* Get the carrier state of the back channel of the handle, 1 for ok, 0 for
* non-ok
......@@ -285,17 +310,22 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
* Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
int (*start)(struct hnae3_handle *handle);
void (*stop)(struct hnae3_handle *handle);
int (*client_start)(struct hnae3_handle *handle);
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
......@@ -309,8 +339,8 @@ struct hnae3_ae_ops {
int (*set_loopback)(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en);
void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc);
int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle,
......@@ -337,6 +367,8 @@ struct hnae3_ae_ops {
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
bool is_first);
int (*do_ioctl)(struct hnae3_handle *handle,
struct ifreq *ifr, int cmd);
int (*add_uc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
int (*rm_uc_addr)(struct hnae3_handle *handle,
......@@ -346,8 +378,6 @@ struct hnae3_ae_ops {
const unsigned char *addr);
int (*rm_mc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
int (*update_mta_status)(struct hnae3_handle *handle);
void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
......@@ -384,7 +414,7 @@ struct hnae3_ae_ops {
int vector_num,
struct hnae3_ring_chain_node *vr_chain);
void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
u32 (*get_fw_version)(struct hnae3_handle *handle);
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
......@@ -395,12 +425,15 @@ struct hnae3_ae_ops {
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct hnae3_handle *handle);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
u16 *free_tqps, u16 *max_rss_size);
int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
u16 *alloc_tqps, u16 *max_rss_size);
int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured);
void (*get_flowctrl_adv)(struct hnae3_handle *handle,
u32 *flowctrl_adv);
int (*set_led_id)(struct hnae3_handle *handle,
......@@ -408,7 +441,37 @@ struct hnae3_ae_ops {
void (*get_link_mode)(struct hnae3_handle *handle,
unsigned long *supported,
unsigned long *advertising);
void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type);
int (*add_fd_entry)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd);
int (*del_fd_entry)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd);
void (*del_all_fd_entries)(struct hnae3_handle *handle,
bool clear_list);
int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd);
int (*get_fd_rule_info)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd);
int (*get_fd_all_rules)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
int (*set_gro_en)(struct hnae3_handle *handle, int enable);
void (*enable_timer_task)(struct hnae3_handle *handle, bool enable);
int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle);
bool (*reset_fail)(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_TEST
int (*send_cmdq)(struct hnae3_handle *handle, void *data, int num);
int (*test_cmdq)(struct hnae3_handle *handle, void *data, int *len);
int (*ecc_handle)(struct hnae3_ae_dev *ae_dev);
#endif
};
struct hnae3_dcb_ops {
......@@ -422,7 +485,6 @@ struct hnae3_dcb_ops {
u8 (*getdcbx)(struct hnae3_handle *);
u8 (*setdcbx)(struct hnae3_handle *, u8);
int (*map_update)(struct hnae3_handle *);
int (*setup_tc)(struct hnae3_handle *, u8, u8 *);
};
......@@ -447,8 +509,10 @@ struct hnae3_tc_info {
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
u16 req_rss_size;
u16 rx_buf_len;
u16 num_desc;
u16 num_tx_desc;
u16 num_rx_desc;
u8 num_tc; /* Total number of enabled TCs */
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
......@@ -459,6 +523,7 @@ struct hnae3_knic_private_info {
const struct hnae3_dcb_ops *dcb_ops;
u16 int_rl_setting;
enum pkt_hash_types rss_type;
};
struct hnae3_roce_private_info {
......@@ -466,30 +531,47 @@ struct hnae3_roce_private_info {
void __iomem *roce_io_base;
int base_vector;
int num_vectors;
/* The below attributes defined for RoCE client. hnae3 gives
* initial values to them, and RoCE client can modify and use
* them.
*/
unsigned long reset_state;
unsigned long instance_state;
unsigned long state;
};
struct hnae3_unic_private_info {
struct net_device *netdev;
u16 rx_buf_len;
u16 num_desc;
u16 num_tx_desc;
u16 num_rx_desc;
u16 num_tqps; /* total number of tqps in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
};
#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0)
#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0)
#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */
#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
struct hnae3_handle {
struct hnae3_client *client;
struct pci_dev *pdev;
void *priv;
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the capabilities for this handle*/
unsigned long last_reset_time;
enum hnae3_reset_type reset_level;
u64 flags; /* Indicate the status for this handle*/
union {
struct net_device *netdev; /* first member */
......@@ -499,6 +581,10 @@ struct hnae3_handle {
};
u32 numa_node_mask; /* for multi-chip support */
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
};
#define hnae3_set_field(origin, mask, shift, val) \
......@@ -513,7 +599,7 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
......@@ -521,4 +607,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited);
#endif
......@@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (h->kinfo.dcb_ops->ieee_getets)
return h->kinfo.dcb_ops->ieee_getets(h, ets);
......@@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (h->kinfo.dcb_ops->ieee_setets)
return h->kinfo.dcb_ops->ieee_setets(h, ets);
......@@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (h->kinfo.dcb_ops->ieee_getpfc)
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
......@@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (h->kinfo.dcb_ops->ieee_setpfc)
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
......
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 Hisilicon Limited. */
#include <linux/debugfs.h>
#include <linux/device.h>
#include "hnae3.h"
#include "hns3_enet.h"
#define HNS3_DBG_READ_LEN 256
static struct dentry *hns3_dbgfs_root;
static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
u32 value, i = 0;
int cnt;
if (!priv->ring_data) {
dev_err(&h->pdev->dev, "ring_data is NULL\n");
return -EFAULT;
}
queue_max = h->kinfo.num_tqps;
cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
if (cnt)
queue_num = 0;
else
queue_max = queue_num + 1;
dev_info(&h->pdev->dev, "queue info\n");
if (queue_num >= h->kinfo.num_tqps) {
dev_err(&h->pdev->dev,
"Queue number(%u) is out of range(%u)\n", queue_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
ring_data = priv->ring_data;
for (i = queue_num; i < queue_max; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
* that the following code is executed within 100ms.
*/
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_L_REG);
dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG);
dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG);
dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG);
dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG);
dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
ring = ring_data[i].ring;
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_L_REG);
dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG);
dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG);
dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG);
dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG);
dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
value);
}
return 0;
}
static int hns3_dbg_queue_map(struct hnae3_handle *h)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
int i;
if (!h->ae_algo->ops->get_global_queue_id)
return -EOPNOTSUPP;
dev_info(&h->pdev->dev, "map info for queue id and vector id\n");
dev_info(&h->pdev->dev,
"local queue id | global queue id | vector id\n");
for (i = 0; i < h->kinfo.num_tqps; i++) {
u16 global_qid;
global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
ring_data = &priv->ring_data[i];
if (!ring_data || !ring_data->ring ||
!ring_data->ring->tqp_vector)
continue;
dev_info(&h->pdev->dev,
" %4d %4d %4d\n",
i, global_qid,
ring_data->ring->tqp_vector->vector_irq);
}
return 0;
}
static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
struct hns3_desc *rx_desc, *tx_desc;
struct device *dev = &h->pdev->dev;
struct hns3_enet_ring *ring;
u32 tx_index, rx_index;
u32 q_num, value;
int cnt;
cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
if (cnt == 2) {
rx_index = tx_index;
} else if (cnt != 1) {
dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt);
return -EINVAL;
}
if (q_num >= h->kinfo.num_tqps) {
dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
ring_data = priv->ring_data;
ring = ring_data[q_num].ring;
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index;
if (tx_index >= ring->desc_num) {
dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index,
ring->desc_num - 1);
return -EINVAL;
}
tx_desc = &ring->desc[tx_index];
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr);
dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag);
dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv);
dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen);
dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
ring = ring_data[(u32)(q_num + h->kinfo.num_tqps)].ring;
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index;
rx_desc = &ring->desc[rx_index];
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr);
dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id);
dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag);
dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb);
dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag);
dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info);
return 0;
}
static void hns3_dbg_help(struct hnae3_handle *h)
{
#define HNS3_DBG_BUF_LEN 256
char printf_buf[HNS3_DBG_BUF_LEN];
dev_info(&h->pdev->dev, "available commands\n");
dev_info(&h->pdev->dev, "queue info [number]\n");
dev_info(&h->pdev->dev, "queue map\n");
dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump promisc [vf id]\n");
dev_info(&h->pdev->dev, "dump tc\n");
dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
dev_info(&h->pdev->dev, "dump tm\n");
dev_info(&h->pdev->dev, "dump checksum\n");
dev_info(&h->pdev->dev, "dump qos pause cfg\n");
dev_info(&h->pdev->dev, "dump qos pri map\n");
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
dev_info(&h->pdev->dev, "dump mac tbl\n");
dev_info(&h->pdev->dev, "dump port vlan tbl\n");
dev_info(&h->pdev->dev, "dump vf vlan tbl [vf id]\n");
dev_info(&h->pdev->dev, "dump mng tbl\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
HNS3_DBG_BUF_LEN - 1);
strncat(printf_buf + strlen(printf_buf),
" [igu egu <prt_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf),
" [rtc] [ppp] [rcb] [tqp <q_num>]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]",
HNS3_DBG_BUF_LEN - 1);
strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int uncopy_bytes;
char *buf;
int len;
if (*ppos != 0)
return 0;
if (count < HNS3_DBG_READ_LEN)
return -ENOSPC;
buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
"Please echo help to cmd to get help information");
uncopy_bytes = copy_to_user(buffer, buf, len);
kfree(buf);
if (uncopy_bytes)
return -EFAULT;
return (*ppos = len);
}
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct hnae3_handle *handle = filp->private_data;
struct hns3_nic_priv *priv = handle->priv;
char *cmd_buf, *cmd_buf_tmp;
int uncopied_bytes;
int ret = 0;
if (*ppos != 0)
return 0;
/* Judge if the instance is being reset. */
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
if (uncopied_bytes) {
kfree(cmd_buf);
return -EFAULT;
}
cmd_buf[count] = '\0';
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
count = cmd_buf_tmp - cmd_buf + 1;
}
if (strncmp(cmd_buf, "help", 4) == 0)
hns3_dbg_help(handle);
else if (strncmp(cmd_buf, "queue info", 10) == 0)
ret = hns3_dbg_queue_info(handle, cmd_buf);
else if (strncmp(cmd_buf, "queue map", 9) == 0)
ret = hns3_dbg_queue_map(handle);
else if (strncmp(cmd_buf, "bd info", 7) == 0)
ret = hns3_dbg_bd_info(handle, cmd_buf);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
if (ret)
hns3_dbg_help(handle);
kfree(cmd_buf);
cmd_buf = NULL;
return count;
}
static const struct file_operations hns3_dbg_cmd_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = hns3_dbg_cmd_read,
.write = hns3_dbg_cmd_write,
};
void hns3_dbg_init(struct hnae3_handle *handle)
{
const char *name = pci_name(handle->pdev);
struct dentry *pfile;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
if (!handle->hnae3_dbgfs)
return;
pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
&hns3_dbg_cmd_fops);
if (!pfile) {
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
dev_warn(&handle->pdev->dev, "create file for %s fail\n",
name);
}
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
{
hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
if (!hns3_dbgfs_root) {
pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
return;
}
}
void hns3_dbg_unregister_debugfs(void)
{
debugfs_remove_recursive(hns3_dbgfs_root);
hns3_dbgfs_root = NULL;
}
......@@ -10,12 +10,12 @@
#define HNS3_MOD_VERSION "1.0"
extern const char hns3_driver_version[];
extern char hns3_driver_version[];
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
HNS3_NIC_STATE_REINITING,
HNS3_NIC_STATE_INITED,
HNS3_NIC_STATE_DOWN,
HNS3_NIC_STATE_DISABLED,
HNS3_NIC_STATE_REMOVING,
......@@ -47,7 +47,7 @@ enum hns3_nic_state {
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
#define HNS3_RING_RX_VM_REG 0x00090
#define HNS3_RING_EN_REG 0x00090
#define HNS3_RING_T0_BE_RST 0x00094
#define HNS3_RING_COULD_BE_RST 0x00098
#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
......@@ -74,9 +74,12 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
#define HNS3_RING_MAX_PENDING 32768
#define HNS3_RING_MIN_PENDING 8
#define HNS3_RING_MIN_PENDING 24
#define HNS3_RING_BD_MULTIPLE 8
#define HNS3_MAX_MTU 9728
/* max frame size of mac */
#define HNS3_MAC_MAX_FRAME 9728
#define HNS3_MAX_MTU \
(HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
......@@ -109,6 +112,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
#define HNS3_RXD_GRO_COUNT_S 24
#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
#define HNS3_RXD_GRO_FIXID_B 30
#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
......@@ -135,9 +142,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_HDL_S 16
#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
#define HNS3_RXD_HSIND_B 31
#define HNS3_RXD_GRO_SIZE_S 16
#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
......@@ -194,6 +200,15 @@ enum hns3_nic_state {
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
#define HNS3_RING_EN_B 0
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
HNS3_L2_TYPE_BROADCAST,
HNS3_L2_TYPE_INVALID,
};
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
......@@ -284,11 +299,11 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
u32 page_offset;
u32 length; /* length of the buffer */
u16 page_offset;
u16 reuse_flag;
u32 length; /* length of the buffer */
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
};
......@@ -368,6 +383,7 @@ struct ring_stats {
u64 err_bd_num;
u64 l2_err;
u64 l3l4_csum_err;
u64 rx_multicast;
};
};
};
......@@ -399,11 +415,18 @@ struct hns3_enet_ring {
*/
int next_to_clean;
int pull_len;
u32 frag_num;
unsigned char *va;
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
};
struct hns_queue;
......@@ -419,8 +442,7 @@ struct hns3_nic_ring_data {
struct hns3_nic_ops {
int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type);
int size, int frag_end, enum hns_desc_type type);
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hns3_enet_ring *ring);
void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
......@@ -461,8 +483,6 @@ enum hns3_link_mode_bits {
#define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40
#define HNS3_INT_ADAPT_DOWN_START 100
struct hns3_enet_coalesce {
u16 int_gl;
u8 gl_adapt_enable;
......@@ -491,12 +511,12 @@ struct hns3_enet_tqp_vector {
struct hns3_enet_ring_group rx_group;
struct hns3_enet_ring_group tx_group;
cpumask_t affinity_mask;
u16 num_tqps; /* total number of tqps in TQP vector */
struct irq_affinity_notify affinity_notify;
char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */
u8 int_adapt_down;
unsigned long last_jiffies;
} ____cacheline_internodealigned_in_smp;
......@@ -541,6 +561,8 @@ struct hns3_nic_priv {
/* Vxlan/Geneve information */
struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
};
union l3_hdr_info {
......@@ -552,6 +574,7 @@ union l3_hdr_info {
union l4_hdr_info {
struct tcphdr *tcp;
struct udphdr *udp;
struct gre_base_hdr *gre;
unsigned char *hdr;
};
......@@ -574,6 +597,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
return ring->next_to_use == ring->next_to_clean;
}
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
return readl(base + reg);
}
static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = READ_ONCE(base);
......@@ -581,6 +609,25 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
writel(value, reg_addr + reg);
}
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
ae_dev->reset_type == HNAE3_FLR_RESET ||
ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
}
#define hns3_read_dev(a, reg) \
hns3_read_reg((a)->io_base, (reg))
static inline bool hns3_nic_resetting(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
}
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
......@@ -615,7 +662,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
......@@ -631,10 +678,17 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
#endif
bool hns3_get_tx_timeo_queue_info(struct net_device *ndev);
void hns3_dbg_init(struct hnae3_handle *handle);
void hns3_dbg_uninit(struct hnae3_handle *handle);
void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
#endif
......@@ -4,7 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
#include <linux/phy.h>
#include "kcompat.h"
#include "hns3_enet.h"
struct hns3_stats {
......@@ -22,13 +22,13 @@ struct hns3_stats {
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
HNS3_TQP_STAT("errors", tx_err_cnt),
HNS3_TQP_STAT("tx_wake", restart_queue),
HNS3_TQP_STAT("tx_busy", tx_busy),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
......@@ -36,7 +36,7 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", rx_pkts),
HNS3_TQP_STAT("bytes", rx_bytes),
......@@ -47,13 +47,14 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
HNS3_TQP_STAT("multicast", rx_multicast),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
#define HNS3_SELF_TEST_TYPE_NUM 2
#define HNS3_SELF_TEST_TPYE_NUM 3
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
......@@ -71,6 +72,7 @@ struct hns3_link_mode_mapping {
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
bool vlan_filter_enable;
int ret;
if (!h->ae_algo->ops->set_loopback ||
......@@ -78,8 +80,9 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
return -EOPNOTSUPP;
switch (loop) {
case HNAE3_MAC_INTER_LOOP_SERDES:
case HNAE3_MAC_INTER_LOOP_MAC:
case HNAE3_LOOP_SERIAL_SERDES:
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
......@@ -90,7 +93,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
if (ret)
return ret;
h->ae_algo->ops->set_promisc_mode(h, en, en);
if (en) {
h->ae_algo->ops->set_promisc_mode(h, true, true);
} else {
/* recover promisc mode before loopback test */
hns3_update_promisc_mode(ndev, h->netdev_flags);
vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(ndev, vlan_filter_enable);
}
return ret;
}
......@@ -100,20 +110,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
if (!h->ae_algo->ops->start)
return -EOPNOTSUPP;
ret = hns3_nic_reset_all_ring(h);
if (ret)
return ret;
ret = h->ae_algo->ops->start(h);
if (ret) {
netdev_err(ndev,
"hns3_lb_up ae start return error: %d\n", ret);
return ret;
}
ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
......@@ -122,19 +122,14 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
if (!h->ae_algo->ops->stop)
return -EOPNOTSUPP;
ret = hns3_lp_setup(ndev, loop_mode, false);
if (ret) {
netdev_err(ndev, "lb_setup return error: %d\n", ret);
return ret;
}
h->ae_algo->ops->stop(h);
usleep_range(10000, 20000);
return 0;
......@@ -148,10 +143,16 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
unsigned int i;
skb_reserve(skb, NET_IP_ALIGN);
#ifdef SKB_PUT_RETURN_VOID_POINT
ethh = skb_put(skb, sizeof(struct ethhdr));
#else
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
#endif
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_reset_mac_header(skb);
......@@ -214,7 +215,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
for (i = start_ringid; i <= end_ringid; i++) {
struct hns3_enet_ring *ring = priv->ring_data[i].ring;
hns3_clean_tx_ring(ring, budget);
hns3_clean_tx_ring(ring);
}
}
......@@ -288,50 +289,49 @@ static void hns3_self_test(struct net_device *ndev,
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
int st_param[HNS3_SELF_TEST_TPYE_NUM][2];
bool if_running = netif_running(ndev);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
bool dis_vlan_filter;
#endif
int test_index = 0;
u32 i;
if (hns3_nic_resetting(ndev)) {
netdev_err(ndev, "dev resetting!");
return;
}
/* Only do offline selftest, or pass by default */
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC;
st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES;
st_param[HNAE3_LOOP_SERIAL_SERDES][1] =
h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
st_param[HNAE3_LOOP_PARALLEL_SERDES][0] =
HNAE3_LOOP_PARALLEL_SERDES;
st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Disable the vlan filter for selftest does not support it */
dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter;
if (dis_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
continue;
data[test_index] = hns3_lp_up(ndev, loop_type);
if (!data[test_index]) {
if (!data[test_index])
data[test_index] = hns3_lp_run_test(ndev, loop_type);
hns3_lp_down(ndev, loop_type);
}
hns3_lp_down(ndev, loop_type);
if (data[test_index])
eth_test->flags |= ETH_TEST_FL_FAILED;
......@@ -341,11 +341,6 @@ static void hns3_self_test(struct net_device *ndev,
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
if (dis_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, true);
#endif
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
}
......@@ -365,9 +360,10 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
case ETH_SS_TEST:
return ops->get_sset_count(h, stringset);
}
return 0;
default:
return -EOPNOTSUPP;
}
}
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
......@@ -383,7 +379,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_",
prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1;
......@@ -431,6 +427,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_TEST:
ops->get_strings(h, stringset, data);
break;
default:
break;
}
}
......@@ -527,6 +525,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev)) {
netdev_err(netdev, "dev resetting!");
return;
}
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
......@@ -556,30 +559,71 @@ static int hns3_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
static void hns3_get_ksettings(struct hnae3_handle *h,
struct ethtool_link_ksettings *cmd)
{
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
/* 1.auto_neg & speed & duplex from cmd */
if (ops->get_ksettings_an_result)
ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
&cmd->base.duplex);
/* 2.get link mode*/
if (ops->get_link_mode)
ops->get_link_mode(h,
cmd->link_modes.supported,
cmd->link_modes.advertising);
/* 3.mdix_ctrl&mdix get from phy reg */
if (ops->get_mdix_mode)
ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
&cmd->base.eth_tp_mdix);
}
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
u32 flowctrl_adv = 0;
const struct hnae3_ae_ops *ops;
u8 media_type;
u8 link_stat;
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
/* 1.auto_neg & speed & duplex from cmd */
if (netdev->phydev) {
phy_ethtool_ksettings_get(netdev->phydev, cmd);
ops = h->ae_algo->ops;
if (ops->get_media_type)
ops->get_media_type(h, &media_type);
else
return -EOPNOTSUPP;
switch (media_type) {
case HNAE3_MEDIA_TYPE_NONE:
cmd->base.port = PORT_NONE;
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
cmd->base.port = PORT_FIBRE;
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_COPPER:
cmd->base.port = PORT_TP;
if (!netdev->phydev)
hns3_get_ksettings(h, cmd);
else
phy_ethtool_ksettings_get(netdev->phydev, cmd);
break;
default:
netdev_warn(netdev, "Unknown media type");
return 0;
}
if (h->ae_algo->ops->get_ksettings_an_result)
h->ae_algo->ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
&cmd->base.duplex);
else
return -EOPNOTSUPP;
/* mdio_support */
cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
link_stat = hns3_get_link(netdev);
if (!link_stat) {
......@@ -587,42 +631,16 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
/* 2.get link mode and port type*/
if (h->ae_algo->ops->get_link_mode)
h->ae_algo->ops->get_link_mode(h,
cmd->link_modes.supported,
cmd->link_modes.advertising);
cmd->base.port = PORT_NONE;
if (h->ae_algo->ops->get_port_type)
h->ae_algo->ops->get_port_type(h,
&cmd->base.port);
/* 3.mdix_ctrl&mdix get from phy reg */
if (h->ae_algo->ops->get_mdix_mode)
h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
&cmd->base.eth_tp_mdix);
/* 4.mdio_support */
cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
/* 5.get flow control setttings */
if (h->ae_algo->ops->get_flowctrl_adv)
h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
if (flowctrl_adv & ADVERTISED_Pause)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
if (flowctrl_adv & ADVERTISED_Asym_Pause)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
return 0;
}
static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
/* Chip don't support this mode. */
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
......@@ -671,12 +689,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
/* currently we only support Toeplitz hash */
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) {
netdev_err(netdev,
"hash func not supported (only Toeplitz hash)\n");
if ((h->pdev->revision == HNAE3_REVISION_ID_20 &&
hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
netdev_err(netdev, "hash func not supported\n");
return -EOPNOTSUPP;
}
if (!indir) {
netdev_err(netdev,
"set rss failed for indir is empty\n");
......@@ -692,32 +711,49 @@ static int hns3_get_rxnfc(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple)
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.rss_size;
break;
cmd->data = h->kinfo.num_tqps;
return 0;
case ETHTOOL_GRXFH:
return h->ae_algo->ops->get_rss_tuple(h, cmd);
if (h->ae_algo->ops->get_rss_tuple)
return h->ae_algo->ops->get_rss_tuple(h, cmd);
return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRULE:
if (h->ae_algo->ops->get_fd_rule_info)
return h->ae_algo->ops->get_fd_rule_info(h, cmd);
return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLALL:
if (h->ae_algo->ops->get_fd_all_rules)
return h->ae_algo->ops->get_fd_all_rules(h, cmd,
rule_locs);
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
u32 new_desc_num)
u32 tx_desc_num, u32 rx_desc_num)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
h->kinfo.num_desc = new_desc_num;
h->kinfo.num_tx_desc = tx_desc_num;
h->kinfo.num_rx_desc = rx_desc_num;
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
priv->ring_data[i].ring->desc_num = new_desc_num;
for (i = 0; i < h->kinfo.num_tqps; i++) {
priv->ring_data[i].ring->desc_num = tx_desc_num;
priv->ring_data[(u32)(i + h->kinfo.num_tqps)].ring->desc_num =
rx_desc_num;
}
return hns3_init_all_ring(priv);
}
......@@ -728,49 +764,52 @@ static int hns3_set_ringparam(struct net_device *ndev,
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
bool if_running = netif_running(ndev);
u32 old_desc_num, new_desc_num;
u32 old_tx_desc_num, new_tx_desc_num;
u32 old_rx_desc_num, new_rx_desc_num;
int queue_num = h->kinfo.num_tqps;
int ret;
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (param->tx_pending != param->rx_pending) {
netdev_err(ndev,
"Descriptors of tx and rx must be equal");
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
}
if (param->tx_pending > HNS3_RING_MAX_PENDING ||
param->tx_pending < HNS3_RING_MIN_PENDING) {
netdev_err(ndev,
"Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n",
param->tx_pending, HNS3_RING_MIN_PENDING,
HNS3_RING_MAX_PENDING);
param->tx_pending < HNS3_RING_MIN_PENDING ||
param->rx_pending > HNS3_RING_MAX_PENDING ||
param->rx_pending < HNS3_RING_MIN_PENDING) {
netdev_err(ndev, "Queue depth out of range [%d-%d]\n",
HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING);
return -EINVAL;
}
new_desc_num = param->tx_pending;
/* Hardware requires that its descriptors must be multiple of eight */
new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE);
old_desc_num = h->kinfo.num_desc;
if (old_desc_num == new_desc_num)
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring_data[0].ring->desc_num;
old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num;
if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num)
return 0;
netdev_info(ndev,
"Changing descriptor count from %d to %d.\n",
old_desc_num, new_desc_num);
"Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
old_tx_desc_num, old_rx_desc_num,
new_tx_desc_num, new_rx_desc_num);
if (if_running)
dev_close(ndev);
ndev->netdev_ops->ndo_stop(ndev);
ret = hns3_uninit_all_ring(priv);
if (ret)
return ret;
ret = hns3_change_all_ring_bd_num(priv, new_desc_num);
ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num,
new_rx_desc_num);
if (ret) {
ret = hns3_change_all_ring_bd_num(priv, old_desc_num);
ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
if (ret) {
netdev_err(ndev,
"Revert to old bd num fail, ret=%d.\n", ret);
......@@ -779,7 +818,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
if (if_running)
ret = dev_open(ndev);
ret = ndev->netdev_ops->ndo_open(ndev);
return ret;
}
......@@ -788,12 +827,22 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
return h->ae_algo->ops->set_rss_tuple(h, cmd);
if (h->ae_algo->ops->set_rss_tuple)
return h->ae_algo->ops->set_rss_tuple(h, cmd);
return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLINS:
if (h->ae_algo->ops->add_fd_entry)
return h->ae_algo->ops->add_fd_entry(h, cmd);
return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLDEL:
if (h->ae_algo->ops->del_fd_entry)
return h->ae_algo->ops->del_fd_entry(h, cmd);
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
......@@ -833,6 +882,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev))
return -EBUSY;
if (queue >= queue_num) {
netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n",
......@@ -994,6 +1046,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
int ret;
int i;
if (hns3_nic_resetting(netdev))
return -EBUSY;
ret = hns3_check_coalesce_para(netdev, cmd);
if (ret)
return ret;
......@@ -1039,7 +1094,7 @@ static int hns3_set_phys_id(struct net_device *netdev,
return h->ae_algo->ops->set_led_id(h, state);
}
static const struct ethtool_ops hns3vf_ethtool_ops = {
struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
......@@ -1047,6 +1102,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
.set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
......@@ -1056,9 +1112,11 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_coalesce = hns3_get_coalesce,
.set_coalesce = hns3_set_coalesce,
.get_link = hns3_get_link,
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
};
static const struct ethtool_ops hns3_ethtool_ops = {
struct ethtool_ops hns3_ethtool_ops = {
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
......
......@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_debugfs.o hclge_err.o ../kcompat.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
......@@ -24,15 +24,15 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
return ring->desc_num - used - 1;
}
static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
{
int u = ring->next_to_use;
int c = ring->next_to_clean;
int ntu = ring->next_to_use;
int ntc = ring->next_to_clean;
if (unlikely(h >= ring->desc_num))
return 0;
if (ntu > ntc)
return head >= ntc && head <= ntu;
return u > c ? (h > c && h <= u) : (h > c || h <= u);
return head >= ntc || head <= ntu;
}
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
......@@ -147,9 +147,6 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
if (!is_valid_csq_clean_head(csq, head)) {
dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
dev_warn(&hdev->pdev->dev,
"IMP firmware watchdog reset soon expected!\n");
return -EIO;
......@@ -171,8 +168,12 @@ static bool hclge_is_special_opcode(u16 opcode)
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
HCLGE_OPC_STATS_32_BIT,
HCLGE_OPC_STATS_MAC,
HCLGE_OPC_STATS_MAC_ALL,
HCLGE_OPC_QUERY_32_BIT_REG,
HCLGE_OPC_QUERY_64_BIT_REG};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
......@@ -260,6 +261,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else if (desc_ret == HCLGE_CMD_NO_AUTH)
retval = -EPERM;
else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
retval = -EOPNOTSUPP;
else
retval = -EIO;
hw->cmq.last_status = desc_ret;
......@@ -350,11 +355,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
/* Check if there is new reset pending, because the higher level
* reset may happen when lower level reset is being processed.
*/
if ((hclge_is_reset_pending(hdev))) {
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
return -EBUSY;
}
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
......
......@@ -39,7 +39,7 @@ struct hclge_cmq_ring {
enum hclge_cmd_return_status {
HCLGE_CMD_EXEC_SUCCESS = 0,
HCLGE_CMD_NO_AUTH = 1,
HCLGE_CMD_NOT_EXEC = 2,
HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3,
};
......@@ -82,20 +82,40 @@ enum hclge_opcode_type {
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
HCLGE_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
HCLGE_OPC_DFX_BD_NUM = 0x0043,
HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
HCLGE_OPC_DFX_NCSI_REG = 0x004A,
HCLGE_OPC_DFX_RTC_REG = 0x004B,
HCLGE_OPC_DFX_PPP_REG = 0x004C,
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
/* check sum command */
HCLGE_OPC_CFG_CHECKSUM_EN = 0x0601,
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
......@@ -126,6 +146,16 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
HCLGE_OPC_QSET_DFX_STS = 0x0844,
HCLGE_OPC_PRI_DFX_STS = 0x0845,
HCLGE_OPC_PG_DFX_STS = 0x0846,
HCLGE_OPC_PORT_DFX_STS = 0x0847,
HCLGE_OPC_SCH_NQ_CNT = 0x0848,
HCLGE_OPC_SCH_RQ_CNT = 0x0849,
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
......@@ -142,6 +172,7 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
......@@ -152,6 +183,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
......@@ -175,21 +207,22 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
/* Multicast linear table commands */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
/* Flow Director command */
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
HCLGE_OPC_FD_TCAM_OP = 0x1203,
HCLGE_OPC_FD_AD_OP = 0x1204,
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
......@@ -208,6 +241,36 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
/* SFP command */
HCLGE_OPC_SFP_GET_SPEED = 0x7104,
/* Error INT commands */
HCLGE_MAC_COMMON_INT_EN = 0x030E,
HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
HCLGE_SSU_ECC_INT_CMD = 0x0989,
HCLGE_SSU_COMMON_INT_CMD = 0x098C,
HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
HCLGE_COMMON_ECC_INT_CFG = 0x1505,
HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
HCLGE_PPP_CMD0_INT_CMD = 0x2100,
HCLGE_PPP_CMD1_INT_CMD = 0x2101,
HCLGE_PPP_MAC_VLAN_IDX_RD = 0x2104,
HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
HCLGE_NCSI_INT_EN = 0x2401,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
......@@ -365,7 +428,9 @@ struct hclge_pf_res_cmd {
#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
__le16 pf_own_fun_number;
__le32 rsv[3];
__le16 tx_buf_size;
__le16 dv_buf_size;
__le32 rsv[2];
};
#define HCLGE_CFG_OFFSET_S 0
......@@ -395,6 +460,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
struct hclge_cfg_param_cmd {
__le32 offset;
......@@ -517,20 +584,6 @@ struct hclge_config_mac_speed_dup_cmd {
u8 rsv[22];
};
#define HCLGE_QUERY_SPEED_S 3
#define HCLGE_QUERY_AN_B 0
#define HCLGE_QUERY_DUPLEX_B 2
#define HCLGE_QUERY_SPEED_M GENMASK(4, 0)
#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B)
#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B)
struct hclge_query_an_speed_dup_cmd {
u8 an_syn_dup_speed;
u8 pause;
u8 rsv[23];
};
#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
......@@ -547,6 +600,11 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
struct hclge_sfp_speed_cmd {
__le32 sfp_speed;
u32 rsv[5];
};
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
......@@ -584,13 +642,12 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
#define HCLGE_VLAN_MASK_EN_B 0
struct hclge_mac_vlan_mask_entry_cmd {
u8 rsv0[2];
u8 vlan_mask;
u8 rsv1;
u8 mac_mask[6];
u8 rsv2[14];
#define HCLGE_UMV_SPC_ALC_B 0
struct hclge_umv_spc_alc_cmd {
u8 allocate;
u8 rsv1[3];
__le32 space_size;
u8 rsv2[16];
};
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
......@@ -615,30 +672,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
#define HCLGE_CFG_MTA_MAC_SEL_S 0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 7
struct hclge_mta_filter_mode_cmd {
u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
u8 rsv[23];
};
#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
struct hclge_cfg_func_mta_filter_cmd {
u8 accept; /* Only used lowest 1 bit */
u8 function_id;
u8 rsv[22];
};
#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
#define HCLGE_CFG_MTA_ITEM_IDX_S 0
#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
struct hclge_cfg_func_mta_item_cmd {
__le16 item_idx; /* Only used lowest 12 bit */
u8 accept; /* Only used lowest 1 bit */
u8 rsv[21];
};
struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
......@@ -664,6 +697,38 @@ struct hclge_mac_vlan_remove_cmd {
u8 rsv[4];
};
#pragma pack(1)
struct hclge_mac_vlan_idx_rd_cmd {
u8 rsv0;
u8 resp_code;
__le16 vlan_tag;
u8 mac_add[6];
__le16 port;
u8 entry_type;
u8 mc_mac_en;
__le16 egress_port;
__le16 egress_queue;
__le16 vsi;
__le32 index;
};
struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
u8 mac_add[6];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
__le16 egress_queue;
__le16 rev0;
u8 i_port_bitmap;
u8 i_port_direction;
u8 rev1[2];
};
#pragma pack()
struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
......@@ -758,6 +823,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
#define HCLGE_GRO_EN_B 0
struct hclge_cfg_gro_status_cmd {
__le16 gro_en;
u8 rsv[22];
};
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
......@@ -778,6 +849,7 @@ struct hclge_reset_cmd {
};
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
#define HCLGE_CMD_SERDES_DONE_B BIT(0)
#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
struct hclge_serdes_lb_cmd {
......@@ -791,6 +863,7 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
......@@ -818,6 +891,76 @@ struct hclge_set_led_state_cmd {
u8 rsv2[20];
};
struct hclge_get_fd_mode_cmd {
u8 mode;
u8 enable;
u8 rsv[22];
};
struct hclge_get_fd_allocation_cmd {
__le32 stage1_entry_num;
__le32 stage2_entry_num;
__le16 stage1_counter_num;
__le16 stage2_counter_num;
u8 rsv[12];
};
struct hclge_set_fd_key_config_cmd {
u8 stage;
u8 key_select;
u8 inner_sipv6_word_en;
u8 inner_dipv6_word_en;
u8 outer_sipv6_word_en;
u8 outer_dipv6_word_en;
u8 rsv1[2];
__le32 tuple_mask;
__le32 meta_data_mask;
u8 rsv2[8];
};
#define HCLGE_FD_EPORT_SW_EN_B 0
struct hclge_fd_tcam_config_1_cmd {
u8 stage;
u8 xy_sel;
u8 port_info;
u8 rsv1[1];
__le32 index;
u8 entry_vld;
u8 rsv2[7];
u8 tcam_data[8];
};
struct hclge_fd_tcam_config_2_cmd {
u8 tcam_data[24];
};
struct hclge_fd_tcam_config_3_cmd {
u8 tcam_data[20];
u8 rsv[4];
};
#define HCLGE_FD_AD_DROP_B 0
#define HCLGE_FD_AD_DIRECT_QID_B 1
#define HCLGE_FD_AD_QID_S 2
#define HCLGE_FD_AD_QID_M GENMASK(12, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
#define HCLGE_FD_AD_COUNTER_NUM_S 13
#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21)
#define HCLGE_FD_AD_WR_RULE_ID_B 0
#define HCLGE_FD_AD_RULE_ID_S 1
#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
struct hclge_fd_ad_config_cmd {
u8 stage;
u8 rsv1[3];
__le32 index;
__le64 ad_data;
u8 rsv2[8];
};
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
......
此差异已折叠。
此差异已折叠。
......@@ -5,8 +5,8 @@
#define __HCLGE_MDIO_H
int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hclge_dev *hdev);
void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册