提交 4645e3f4 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: hns3: update hns3 driver from driver team

driver inclusion
category: feature

-----------------------------------------

Based on add763cbef9424c6ea624dce6d6d2d51048cf9da
("net: hns3: Reduce resources use in kdump kernel")
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 bea163a4
...@@ -44,6 +44,7 @@ enum HCLGE_MBX_OPCODE { ...@@ -44,6 +44,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
}; };
...@@ -68,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode { ...@@ -68,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
}; };
#define HCLGE_MBX_MAX_MSG_SIZE 16 #define HCLGE_MBX_MAX_MSG_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
...@@ -86,6 +87,7 @@ struct hclge_mbx_vf_to_pf_cmd { ...@@ -86,6 +87,7 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 rsv1[2]; u8 rsv1[2];
u8 msg_len; u8 msg_len;
u8 rsv2[3]; u8 rsv2[3];
/* msg[0] means opcode and msg[1] means sub opcode, other is msg data */
u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
}; };
...@@ -94,6 +96,7 @@ struct hclge_mbx_pf_to_vf_cmd { ...@@ -94,6 +96,7 @@ struct hclge_mbx_pf_to_vf_cmd {
u8 rsv[3]; u8 rsv[3];
u8 msg_len; u8 msg_len;
u8 rsv1[3]; u8 rsv1[3];
/* msg[0] means OPCODE, other is msg data */
u16 msg[8]; u16 msg[8];
}; };
......
...@@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, ...@@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
void hnae3_set_client_init_flag(struct hnae3_client *client, void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited) struct hnae3_ae_dev *ae_dev, int inited)
{ {
if (!client || !ae_dev)
return;
switch (client->type) { switch (client->type) {
case HNAE3_CLIENT_KNIC: case HNAE3_CLIENT_KNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
...@@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client) ...@@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client)
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
int ret = 0; int ret = 0;
if (!client)
return -ENODEV;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* one system should only have one client for every type */ /* one system should only have one client for every type */
list_for_each_entry(client_tmp, &hnae3_client_list, node) { list_for_each_entry(client_tmp, &hnae3_client_list, node) {
...@@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client) ...@@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client)
{ {
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
if (!client)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */ /* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
...@@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
struct hnae3_client *client; struct hnae3_client *client;
int ret = 0; int ret = 0;
if (!ae_algo)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
...@@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
if (!id) if (!id)
continue; continue;
/* ae_dev init should set flag */ if (!ae_algo->ops) {
dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
continue;
}
ae_dev->ops = ae_algo->ops; ae_dev->ops = ae_algo->ops;
ret = ae_algo->ops->init_ae_dev(ae_dev); ret = ae_algo->ops->init_ae_dev(ae_dev);
if (ret) { if (ret) {
dev_err(&ae_dev->pdev->dev, dev_err(&ae_dev->pdev->dev,
...@@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
continue; continue;
} }
/* ae_dev init should set flag */
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and /* check the client list for the match with this ae_dev type and
...@@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client; struct hnae3_client *client;
if (!ae_algo)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */ /* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
...@@ -245,6 +265,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -245,6 +265,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hnae3_client *client; struct hnae3_client *client;
int ret = 0; int ret = 0;
if (!ae_dev)
return -ENODEV;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
...@@ -255,15 +278,13 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -255,15 +278,13 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!id) if (!id)
continue; continue;
ae_dev->ops = ae_algo->ops; if (!ae_algo->ops) {
dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out_err; goto out_err;
} }
ae_dev->ops = ae_algo->ops;
/* ae_dev init should set flag */
ret = ae_dev->ops->init_ae_dev(ae_dev); ret = ae_dev->ops->init_ae_dev(ae_dev);
if (ret) { if (ret) {
dev_err(&ae_dev->pdev->dev, dev_err(&ae_dev->pdev->dev,
...@@ -271,6 +292,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -271,6 +292,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
goto out_err; goto out_err;
} }
/* ae_dev init should set flag */
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break; break;
} }
...@@ -307,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -307,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hnae3_ae_algo *ae_algo; struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client; struct hnae3_client *client;
if (!ae_dev)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */ /* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
......
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
#include "hnae3.h" #include "hnae3.h"
#include "hns3_enet.h" #include "hns3_enet.h"
static static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) ...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
......
...@@ -146,8 +146,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) ...@@ -146,8 +146,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
tqp_vectors->name, tqp_vectors->name, tqp_vectors);
tqp_vectors);
if (ret) { if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n", netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq); tqp_vectors->vector_irq);
...@@ -290,8 +289,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) ...@@ -290,8 +289,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
ret = netif_set_real_num_tx_queues(netdev, queue_size); ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) { if (ret) {
netdev_err(netdev, netdev_err(netdev,
"netif_set_real_num_tx_queues fail, ret=%d!\n", "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
ret);
return ret; return ret;
} }
...@@ -347,7 +345,7 @@ static int hns3_nic_net_up(struct net_device *netdev) ...@@ -347,7 +345,7 @@ static int hns3_nic_net_up(struct net_device *netdev)
/* get irq resource for all vectors */ /* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv); ret = hns3_nic_init_irq(priv);
if (ret) { if (ret) {
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); netdev_err(netdev, "init irq failed! ret=%d\n", ret);
return ret; return ret;
} }
...@@ -422,16 +420,13 @@ static int hns3_nic_net_open(struct net_device *netdev) ...@@ -422,16 +420,13 @@ static int hns3_nic_net_open(struct net_device *netdev)
ret = hns3_nic_net_up(netdev); ret = hns3_nic_net_up(netdev);
if (ret) { if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state); set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
netdev_err(netdev, netdev_err(netdev, "net up fail, ret=%d!\n", ret);
"hns net up fail, ret=%d!\n", ret);
return ret; return ret;
} }
kinfo = &h->kinfo; kinfo = &h->kinfo;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
netdev_set_prio_tc_map(netdev, i, netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
kinfo->prio_tc[i]);
}
if (h->ae_algo->ops->enable_timer_task) if (h->ae_algo->ops->enable_timer_task)
h->ae_algo->ops->enable_timer_task(priv->ae_handle, true); h->ae_algo->ops->enable_timer_task(priv->ae_handle, true);
...@@ -635,7 +630,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, ...@@ -635,7 +630,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
if (l3.v4->version == 4) if (l3.v4->version == 4)
l3.v4->check = 0; l3.v4->check = 0;
/* tunnel packet.*/ /* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM | SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
...@@ -665,11 +660,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, ...@@ -665,11 +660,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
l3.v4->check = 0; l3.v4->check = 0;
} }
/* normal or tunnel packet*/ /* normal or tunnel packet */
l4_offset = l4.hdr - skb->data; l4_offset = l4.hdr - skb->data;
hdr_len = (l4.tcp->doff << 2) + l4_offset; hdr_len = (l4.tcp->doff << 2) + l4_offset;
/* remove payload length from inner pseudo checksum when tso*/ /* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset; l4_paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(l4_paylen)); (__force __wsum)htonl(l4_paylen));
...@@ -757,7 +752,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, ...@@ -757,7 +752,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
l2_len = l3.hdr - skb->data; l2_len = l3.hdr - skb->data;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/ /* tunnel packet */
if (skb->encapsulation) { if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */ /* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len; ol2_len = l2_len;
...@@ -769,9 +764,9 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, ...@@ -769,9 +764,9 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
ol3_len >> 2); ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/ /* MAC in UDP, MAC in GRE (0x6558) */
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
/* switch MAC header ptr from outer to inner header.*/ /* switch MAC header ptr from outer to inner header */
l2_hdr = skb_inner_mac_header(skb); l2_hdr = skb_inner_mac_header(skb);
/* compute OL4 header size, defined in 4 Bytes. */ /* compute OL4 header size, defined in 4 Bytes. */
...@@ -893,9 +888,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -893,9 +888,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
l3.hdr = skb_network_header(skb); l3.hdr = skb_network_header(skb);
/* define OL3 type and tunnel type(OL4).*/ /* define OL3 type and tunnel type(OL4) */
if (skb->encapsulation) { if (skb->encapsulation) {
/* define outer network header type.*/ /* define outer network header type */
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb)) if (skb_is_gso(skb))
hns3_set_field(*ol_type_vlan_len_msec, hns3_set_field(*ol_type_vlan_len_msec,
...@@ -911,7 +906,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -911,7 +906,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
HNS3_OL3T_IPV6); HNS3_OL3T_IPV6);
} }
/* define tunnel type(OL4).*/ /* define tunnel type(OL4) */
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_UDP: case IPPROTO_UDP:
hns3_set_field(*ol_type_vlan_len_msec, hns3_set_field(*ol_type_vlan_len_msec,
...@@ -1081,8 +1076,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1081,8 +1076,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* Set txbd */ /* Set txbd */
desc->tx.ol_type_vlan_len_msec = desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec); cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen); desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss); desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag); desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
...@@ -1094,7 +1088,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1094,7 +1088,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
} }
if (unlikely(dma_mapping_error(ring->dev, dma))) { if (unlikely(dma_mapping_error(dev, dma))) {
ring->stats.sw_err_cnt++; ring->stats.sw_err_cnt++;
return -ENOMEM; return -ENOMEM;
} }
...@@ -1123,7 +1117,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1123,7 +1117,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri = desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri); cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
/* move ring pointer to next.*/ /* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use]; desc_cb = &ring->desc_cb[ring->next_to_use];
...@@ -1673,7 +1667,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1673,7 +1667,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{ {
struct hns3_nic_priv *priv = netdev_priv(ndev); struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hns3_enet_ring *tx_ring = NULL; struct hns3_enet_ring *tx_ring;
int timeout_queue = 0; int timeout_queue = 0;
int hw_head, hw_tail; int hw_head, hw_tail;
int i; int i;
...@@ -1816,8 +1810,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1816,8 +1810,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
int ret; int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
GFP_KERNEL);
if (!ae_dev) { if (!ae_dev) {
ret = -ENOMEM; ret = -ENOMEM;
return ret; return ret;
...@@ -2017,7 +2010,6 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -2017,7 +2010,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision > HNAE3_REVISION_ID_20) { if (pdev->revision > HNAE3_REVISION_ID_20) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
#ifdef NETIF_F_GRO_HW #ifdef NETIF_F_GRO_HW
netdev->features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW;
netdev->hw_features |= NETIF_F_GRO_HW; netdev->hw_features |= NETIF_F_GRO_HW;
...@@ -2125,8 +2117,7 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) ...@@ -2125,8 +2117,7 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
int size = ring->desc_num * sizeof(ring->desc[0]); int size = ring->desc_num * sizeof(ring->desc[0]);
ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
&ring->desc_dma_addr, &ring->desc_dma_addr, GFP_KERNEL);
GFP_KERNEL);
if (!ring->desc) if (!ring->desc)
return -ENOMEM; return -ENOMEM;
...@@ -2198,8 +2189,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, ...@@ -2198,8 +2189,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{ {
ring->desc_cb[i].reuse_flag = 0; ring->desc_cb[i].reuse_flag = 0;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset); ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0; ring->desc[i].rx.bd_base_info = 0;
} }
...@@ -2210,7 +2201,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, ...@@ -2210,7 +2201,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length; (*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
hns3_free_buffer_detach(ring, ring->next_to_clean); hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
...@@ -2292,8 +2283,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring) ...@@ -2292,8 +2283,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
} }
static void static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) int cleand_count)
{ {
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
struct hns3_desc_cb res_cbs; struct hns3_desc_cb res_cbs;
...@@ -2332,48 +2323,30 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2332,48 +2323,30 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
{ {
struct hns3_desc *desc; struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int truesize, size; int size = le16_to_cpu(desc->rx.size);
int last_offset; u32 truesize = hnae3_buf_size(ring);
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
truesize = hnae3_buf_size(ring);
if (!twobufs)
last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize); size - pull_len, truesize);
/* Avoid re-using remote pages,flag default unreuse */ /* Avoid re-using remote pages, or the stack is still using the page
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) * when page buffer has wrap back, flag default unreuse
return; */
if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
if (twobufs) { (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
/* If we are only owner of page we can reuse it */
if (likely(page_count(desc_cb->priv) == 1)) {
/* Flip page offset to other buffer */
desc_cb->page_offset ^= truesize;
desc_cb->reuse_flag = 1;
/* bump ref count on page before it is given*/
get_page(desc_cb->priv);
}
return; return;
}
/* Move offset up to the next cache line */ /* Move offset up to the next cache line */
desc_cb->page_offset += truesize; desc_cb->page_offset += truesize;
if (desc_cb->page_offset <= last_offset) { if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given*/ /* Bump ref count on page before it is given */
get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1;
desc_cb->page_offset = 0;
get_page(desc_cb->priv); get_page(desc_cb->priv);
} }
} }
...@@ -2525,7 +2498,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, ...@@ -2525,7 +2498,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */ /* We can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */ else /* This page cannot be reused so discard it */
put_page(desc_cb->priv); put_page(desc_cb->priv);
...@@ -2648,8 +2621,7 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, ...@@ -2648,8 +2621,7 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
*/ */
NAPI_GRO_CB(skb)->count = gro_count; NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4) if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6) else if (l3_type == HNS3_L3_TYPE_IPV6)
...@@ -2795,8 +2767,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2795,8 +2767,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return 0; return 0;
} }
int hns3_clean_rx_ring( int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{ {
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
...@@ -2851,8 +2822,7 @@ int hns3_clean_rx_ring( ...@@ -2851,8 +2822,7 @@ int hns3_clean_rx_ring(
out: out:
/* Make all data has been write before submit */ /* Make all data has been write before submit */
if (clean_count + unused_count > 0) if (clean_count + unused_count > 0)
hns3_nic_alloc_rx_buffers(ring, hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
clean_count + unused_count);
return recv_pkts; return recv_pkts;
} }
...@@ -3309,10 +3279,8 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) ...@@ -3309,10 +3279,8 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) { if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
irq_set_affinity_notifier(tqp_vector->vector_irq, irq_set_affinity_notifier(tqp_vector->vector_irq, NULL);
NULL); irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
irq_set_affinity_hint(tqp_vector->vector_irq,
NULL);
free_irq(priv->tqp_vector[i].vector_irq, free_irq(priv->tqp_vector[i].vector_irq,
&priv->tqp_vector[i]); &priv->tqp_vector[i]);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
...@@ -3453,8 +3421,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) ...@@ -3453,8 +3421,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
if (ring->desc_num <= 0 || ring->buf_size <= 0) if (ring->desc_num <= 0 || ring->buf_size <= 0)
return -EINVAL; return -EINVAL;
ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
GFP_KERNEL); sizeof(ring->desc_cb[0]), GFP_KERNEL);
if (!ring->desc_cb) { if (!ring->desc_cb) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -3475,7 +3443,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) ...@@ -3475,7 +3443,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
out_with_desc: out_with_desc:
hns3_free_desc(ring); hns3_free_desc(ring);
out_with_desc_cb: out_with_desc_cb:
kfree(ring->desc_cb); devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL; ring->desc_cb = NULL;
out: out:
return ret; return ret;
...@@ -3484,7 +3452,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) ...@@ -3484,7 +3452,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
static void hns3_fini_ring(struct hns3_enet_ring *ring) static void hns3_fini_ring(struct hns3_enet_ring *ring)
{ {
hns3_free_desc(ring); hns3_free_desc(ring);
kfree(ring->desc_cb); devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL; ring->desc_cb = NULL;
ring->next_to_clean = 0; ring->next_to_clean = 0;
ring->next_to_use = 0; ring->next_to_use = 0;
...@@ -3525,8 +3493,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) ...@@ -3525,8 +3493,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
struct hnae3_queue *q = ring->tqp; struct hnae3_queue *q = ring->tqp;
if (!HNAE3_IS_TX_RING(ring)) { if (!HNAE3_IS_TX_RING(ring)) {
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
(u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1)); (u32)((dma >> 31) >> 1));
...@@ -3773,7 +3740,7 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3773,7 +3740,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
ret = hns3_client_start(handle); ret = hns3_client_start(handle);
if (ret) { if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
goto out_reg_netdev_fail; goto out_client_start;
} }
hns3_dcbnl_setup(handle); hns3_dcbnl_setup(handle);
...@@ -3789,6 +3756,8 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3789,6 +3756,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret; return ret;
out_client_start:
unregister_netdev(netdev);
out_reg_netdev_fail: out_reg_netdev_fail:
hns3_uninit_phy(netdev); hns3_uninit_phy(netdev);
out_init_phy: out_init_phy:
...@@ -3958,8 +3927,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ...@@ -3958,8 +3927,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ret); ret);
return ret; return ret;
} }
hns3_replace_buffer(ring, ring->next_to_use, hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
&res_cbs);
} }
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
} }
...@@ -4130,7 +4098,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) ...@@ -4130,7 +4098,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
ret = hns3_nic_net_open(kinfo->netdev); ret = hns3_nic_net_open(kinfo->netdev);
if (ret) { if (ret) {
netdev_err(kinfo->netdev, netdev_err(kinfo->netdev,
"hns net up fail, ret=%d!\n", ret); "net up fail, ret=%d!\n", ret);
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
return ret; return ret;
} }
...@@ -4171,6 +4139,12 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) ...@@ -4171,6 +4139,12 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret) if (ret)
goto err_uninit_vector; goto err_uninit_vector;
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
goto err_uninit_vector;
}
set_bit(HNS3_NIC_STATE_INITED, &priv->state); set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return 0; return 0;
......
...@@ -398,7 +398,6 @@ struct hns3_enet_ring { ...@@ -398,7 +398,6 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next; struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp; struct hnae3_queue *tqp;
char ring_name[HNS3_RING_NAME_LEN];
struct device *dev; /* will be used for DMA mapping of descriptors */ struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */ /* statistic */
...@@ -408,9 +407,6 @@ struct hns3_enet_ring { ...@@ -408,9 +407,6 @@ struct hns3_enet_ring {
dma_addr_t desc_dma_addr; dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */ u16 desc_num; /* total number of desc */
u16 max_desc_num_per_pkt;
u16 max_raw_data_sz_per_desc;
u16 max_pkt_size;
int next_to_use; /* idx of next spare desc */ int next_to_use; /* idx of next spare desc */
/* idx of lastest sent desc, the ring is empty when equal to /* idx of lastest sent desc, the ring is empty when equal to
...@@ -424,9 +420,6 @@ struct hns3_enet_ring { ...@@ -424,9 +420,6 @@ struct hns3_enet_ring {
u32 flag; /* ring attribute */ u32 flag; /* ring attribute */
int numa_node;
cpumask_t affinity_mask;
int pending_buf; int pending_buf;
struct sk_buff *skb; struct sk_buff *skb;
struct sk_buff *tail_skb; struct sk_buff *tail_skb;
...@@ -634,7 +627,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) ...@@ -634,7 +627,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) #define ring_to_dev(ring) ((ring)->dev)
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE) DMA_TO_DEVICE : DMA_FROM_DEVICE)
......
...@@ -58,6 +58,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { ...@@ -58,6 +58,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128
#define HNS3_NIC_LB_SETUP_USEC 10000
/* Nic loopback test err */ /* Nic loopback test err */
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
...@@ -115,7 +116,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) ...@@ -115,7 +116,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret; return ret;
ret = hns3_lp_setup(ndev, loop_mode, true); ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000); usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return ret; return ret;
} }
...@@ -130,7 +131,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) ...@@ -130,7 +131,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret; return ret;
} }
usleep_range(10000, 20000); usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return 0; return 0;
} }
...@@ -152,6 +153,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) ...@@ -152,6 +153,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
/* The dst mac addr of loopback packet is the same as the host'
* mac addr, the SSU component may loop back the packet to host
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
ethh->h_dest[5] += 0x1f; ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source); eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP); ethh->h_proto = htons(ETH_P_ARP);
......
...@@ -209,12 +209,14 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, ...@@ -209,12 +209,14 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw,
retval = -EPERM; retval = -EPERM;
else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED) else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
retval = -EOPNOTSUPP; retval = -EOPNOTSUPP;
else if (desc_ret == HCLGE_CMD_QUEUE_ILLEGAL)
retval = -ENXIO;
else else
retval = -EIO; retval = -EIO;
hw->cmq.last_status = desc_ret; hw->cmq.last_status = desc_ret;
(*ntc)++; (*ntc)++;
handle++; handle++;
if (*ntc == hw->cmq.csq.desc_num) if (*ntc >= hw->cmq.csq.desc_num)
*ntc = 0; *ntc = 0;
} }
return retval; return retval;
...@@ -257,7 +259,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -257,7 +259,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle]; *desc_to_use = desc[handle];
(hw->cmq.csq.next_to_use)++; (hw->cmq.csq.next_to_use)++;
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
hw->cmq.csq.next_to_use = 0; hw->cmq.csq.next_to_use = 0;
handle++; handle++;
} }
...@@ -393,6 +395,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) ...@@ -393,6 +395,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
return 0; return 0;
} }
static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
{
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
static void hclge_destroy_queue(struct hclge_cmq_ring *ring) static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
{ {
spin_lock(&ring->lock); spin_lock(&ring->lock);
...@@ -405,3 +421,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw) ...@@ -405,3 +421,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw)
hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.csq);
hclge_destroy_queue(&hw->cmq.crq); hclge_destroy_queue(&hw->cmq.crq);
} }
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock_bh(&hdev->hw.cmq.crq.lock);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
hclge_cmd_uninit_regs(&hdev->hw);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
hclge_destroy_cmd_queue(&hdev->hw);
}
...@@ -41,6 +41,7 @@ enum hclge_cmd_return_status { ...@@ -41,6 +41,7 @@ enum hclge_cmd_return_status {
HCLGE_CMD_NO_AUTH = 1, HCLGE_CMD_NO_AUTH = 1,
HCLGE_CMD_NOT_SUPPORTED = 2, HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3, HCLGE_CMD_QUEUE_FULL = 3,
HCLGE_CMD_QUEUE_ILLEGAL = 10,
}; };
enum hclge_cmd_status { enum hclge_cmd_status {
...@@ -318,16 +319,16 @@ struct hclge_ctrl_vector_chain_cmd { ...@@ -318,16 +319,16 @@ struct hclge_ctrl_vector_chain_cmd {
u8 rsv; u8 rsv;
}; };
#define HCLGE_TC_NUM 8 #define HCLGE_MAX_TC_NUM 8
#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
struct hclge_tx_buff_alloc_cmd { struct hclge_tx_buff_alloc_cmd {
__le16 tx_pkt_buff[HCLGE_TC_NUM]; __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM];
u8 tx_buff_rsv[8]; u8 tx_buff_rsv[8];
}; };
struct hclge_rx_priv_buff_cmd { struct hclge_rx_priv_buff_cmd {
__le16 buf_num[HCLGE_TC_NUM]; __le16 buf_num[HCLGE_MAX_TC_NUM];
__le16 shared_buf; __le16 shared_buf;
u8 rsv[6]; u8 rsv[6];
}; };
...@@ -373,7 +374,6 @@ struct hclge_priv_buf { ...@@ -373,7 +374,6 @@ struct hclge_priv_buf {
u32 enable; /* Enable TC private buffer or not */ u32 enable; /* Enable TC private buffer or not */
}; };
#define HCLGE_MAX_TC_NUM 8
struct hclge_shared_buf { struct hclge_shared_buf {
struct hclge_waterline self; struct hclge_waterline self;
struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM];
...@@ -620,6 +620,11 @@ enum hclge_mac_vlan_tbl_opcode { ...@@ -620,6 +620,11 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
}; };
enum hclge_mac_vlan_add_resp_code {
HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
};
#define HCLGE_MAC_VLAN_BIT0_EN_B 0 #define HCLGE_MAC_VLAN_BIT0_EN_B 0
#define HCLGE_MAC_VLAN_BIT1_EN_B 1 #define HCLGE_MAC_VLAN_BIT1_EN_B 1
#define HCLGE_MAC_EPORT_SW_EN_B 12 #define HCLGE_MAC_EPORT_SW_EN_B 12
...@@ -732,7 +737,9 @@ struct hclge_mac_ethertype_idx_rd_cmd { ...@@ -732,7 +737,9 @@ struct hclge_mac_ethertype_idx_rd_cmd {
struct hclge_vlan_filter_ctrl_cmd { struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type; u8 vlan_type;
u8 vlan_fe; u8 vlan_fe;
u8 rsv[22]; u8 rsv1[2];
u8 vf_id;
u8 rsv2[19];
}; };
struct hclge_vlan_filter_pf_cfg_cmd { struct hclge_vlan_filter_pf_cfg_cmd {
...@@ -996,6 +1003,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, ...@@ -996,6 +1003,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
struct hclge_desc *desc); struct hclge_desc *desc);
void hclge_destroy_cmd_queue(struct hclge_hw *hw); void hclge_cmd_uninit(struct hclge_dev *hdev);
int hclge_cmd_queue_init(struct hclge_dev *hdev); int hclge_cmd_queue_init(struct hclge_dev *hdev);
#endif #endif
...@@ -42,6 +42,8 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, ...@@ -42,6 +42,8 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{ {
struct hclge_desc desc[4]; struct hclge_desc desc[4];
int entries_per_desc;
int index;
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
...@@ -59,7 +61,9 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) ...@@ -59,7 +61,9 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
return ret; return ret;
} }
return (int)desc[offset / 6].data[offset % 6]; entries_per_desc = ARRAY_SIZE(desc[0].data);
index = offset % entries_per_desc;
return (int)desc[offset / entries_per_desc].data[index];
} }
static int hclge_dbg_cmd_send(struct hclge_dev *hdev, static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
...@@ -96,12 +100,13 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -96,12 +100,13 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
{ {
struct hclge_desc *desc_src; struct hclge_desc *desc_src;
struct hclge_desc *desc; struct hclge_desc *desc;
int entries_per_desc;
int bd_num, buf_len; int bd_num, buf_len;
int ret, i; int ret, i;
int index; int index;
int max; int max;
ret = kstrtouint(cmd_buf, 10, &index); ret = kstrtouint(cmd_buf, 0, &index);
index = (ret != 0) ? 0 : index; index = (ret != 0) ? 0 : index;
bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset); bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
...@@ -125,14 +130,18 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -125,14 +130,18 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return; return;
} }
max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num; entries_per_desc = ARRAY_SIZE(desc->data);
max = (bd_num * entries_per_desc) <= msg_num ?
(bd_num * entries_per_desc) : msg_num;
desc = desc_src; desc = desc_src;
for (i = 0; i < max; i++) { for (i = 0; i < max; i++) {
(((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc; ((i > 0) && ((i % entries_per_desc) == 0)) ? desc++ : desc;
if (dfx_message->flag) if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n", dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message, desc->data[i % 6]); dfx_message->message,
desc->data[i % entries_per_desc]);
dfx_message++; dfx_message++;
} }
...@@ -244,92 +253,92 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf) ...@@ -244,92 +253,92 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
{ {
int msg_num; int msg_num;
if (strncmp(&cmd_buf[9], "bios common", 11) == 0) { if (strncmp(cmd_buf, "bios common", 11) == 0) {
msg_num = sizeof(hclge_dbg_bios_common_reg) / msg_num = sizeof(hclge_dbg_bios_common_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
&cmd_buf[21], msg_num, &cmd_buf[sizeof("bios common")],
HCLGE_DBG_DFX_BIOS_OFFSET, msg_num, HCLGE_DBG_DFX_BIOS_OFFSET,
HCLGE_OPC_DFX_BIOS_COMMON_REG); HCLGE_OPC_DFX_BIOS_COMMON_REG);
} else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) { } else if (strncmp(cmd_buf, "ssu", 3) == 0) {
msg_num = sizeof(hclge_dbg_ssu_reg_0) / msg_num = sizeof(hclge_dbg_ssu_reg_0) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_0_OFFSET, HCLGE_DBG_DFX_SSU_0_OFFSET,
HCLGE_OPC_DFX_SSU_REG_0); HCLGE_OPC_DFX_SSU_REG_0);
msg_num = sizeof(hclge_dbg_ssu_reg_1) / msg_num = sizeof(hclge_dbg_ssu_reg_1) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_1_OFFSET, HCLGE_DBG_DFX_SSU_1_OFFSET,
HCLGE_OPC_DFX_SSU_REG_1); HCLGE_OPC_DFX_SSU_REG_1);
msg_num = sizeof(hclge_dbg_ssu_reg_2) / msg_num = sizeof(hclge_dbg_ssu_reg_2) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_2_OFFSET, HCLGE_DBG_DFX_SSU_2_OFFSET,
HCLGE_OPC_DFX_SSU_REG_2); HCLGE_OPC_DFX_SSU_REG_2);
} else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) { } else if (strncmp(cmd_buf, "igu egu", 7) == 0) {
msg_num = sizeof(hclge_dbg_igu_egu_reg) / msg_num = sizeof(hclge_dbg_igu_egu_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
&cmd_buf[17], msg_num, &cmd_buf[sizeof("igu egu")], msg_num,
HCLGE_DBG_DFX_IGU_OFFSET, HCLGE_DBG_DFX_IGU_OFFSET,
HCLGE_OPC_DFX_IGU_EGU_REG); HCLGE_OPC_DFX_IGU_EGU_REG);
} else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) { } else if (strncmp(cmd_buf, "rpu", 3) == 0) {
msg_num = sizeof(hclge_dbg_rpu_reg_0) / msg_num = sizeof(hclge_dbg_rpu_reg_0) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rpu")], msg_num,
HCLGE_DBG_DFX_RPU_0_OFFSET, HCLGE_DBG_DFX_RPU_0_OFFSET,
HCLGE_OPC_DFX_RPU_REG_0); HCLGE_OPC_DFX_RPU_REG_0);
msg_num = sizeof(hclge_dbg_rpu_reg_1) / msg_num = sizeof(hclge_dbg_rpu_reg_1) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rpu")], msg_num,
HCLGE_DBG_DFX_RPU_1_OFFSET, HCLGE_DBG_DFX_RPU_1_OFFSET,
HCLGE_OPC_DFX_RPU_REG_1); HCLGE_OPC_DFX_RPU_REG_1);
} else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) { } else if (strncmp(cmd_buf, "ncsi", 4) == 0) {
msg_num = sizeof(hclge_dbg_ncsi_reg) / msg_num = sizeof(hclge_dbg_ncsi_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
&cmd_buf[14], msg_num, &cmd_buf[sizeof("ncsi")], msg_num,
HCLGE_DBG_DFX_NCSI_OFFSET, HCLGE_DBG_DFX_NCSI_OFFSET,
HCLGE_OPC_DFX_NCSI_REG); HCLGE_OPC_DFX_NCSI_REG);
} else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) { } else if (strncmp(cmd_buf, "rtc", 3) == 0) {
msg_num = sizeof(hclge_dbg_rtc_reg) / msg_num = sizeof(hclge_dbg_rtc_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rtc")], msg_num,
HCLGE_DBG_DFX_RTC_OFFSET, HCLGE_DBG_DFX_RTC_OFFSET,
HCLGE_OPC_DFX_RTC_REG); HCLGE_OPC_DFX_RTC_REG);
} else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) { } else if (strncmp(cmd_buf, "ppp", 3) == 0) {
msg_num = sizeof(hclge_dbg_ppp_reg) / msg_num = sizeof(hclge_dbg_ppp_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ppp")], msg_num,
HCLGE_DBG_DFX_PPP_OFFSET, HCLGE_DBG_DFX_PPP_OFFSET,
HCLGE_OPC_DFX_PPP_REG); HCLGE_OPC_DFX_PPP_REG);
} else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) { } else if (strncmp(cmd_buf, "rcb", 3) == 0) {
msg_num = sizeof(hclge_dbg_rcb_reg) / msg_num = sizeof(hclge_dbg_rcb_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rcb")], msg_num,
HCLGE_DBG_DFX_RCB_OFFSET, HCLGE_DBG_DFX_RCB_OFFSET,
HCLGE_OPC_DFX_RCB_REG); HCLGE_OPC_DFX_RCB_REG);
} else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) { } else if (strncmp(cmd_buf, "tqp", 3) == 0) {
msg_num = sizeof(hclge_dbg_tqp_reg) / msg_num = sizeof(hclge_dbg_tqp_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("tqp")], msg_num,
HCLGE_DBG_DFX_TQP_OFFSET, HCLGE_DBG_DFX_TQP_OFFSET,
HCLGE_OPC_DFX_TQP_REG); HCLGE_OPC_DFX_TQP_REG);
} else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) { } else if (strncmp(cmd_buf, "dcb", 3) == 0) {
hclge_dbg_dump_dcb(hdev, &cmd_buf[13]); hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
} else { } else {
dev_info(&hdev->pdev->dev, "unknown command\n"); dev_info(&hdev->pdev->dev, "unknown command\n");
return; return;
...@@ -601,7 +610,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf) ...@@ -601,7 +610,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
int pri_id, ret; int pri_id, ret;
u32 i; u32 i;
ret = kstrtouint(&cmd_buf[12], 10, &queue_id); ret = kstrtouint(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id; queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
...@@ -772,7 +781,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -772,7 +781,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
tx_buf_cmd->tx_pkt_buff[i]); tx_buf_cmd->tx_pkt_buff[i]);
...@@ -784,7 +793,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -784,7 +793,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
rx_buf_cmd->buf_num[i]); rx_buf_cmd->buf_num[i]);
...@@ -893,8 +902,8 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) ...@@ -893,8 +902,8 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev)
mc_tbl_idx = 0; mc_tbl_idx = 0;
for (i = 0; i < HCLGE_DBG_MAC_TBL_MAX; i++) { for (i = 0; i < HCLGE_DBG_MAC_TBL_MAX; i++) {
/* Prevent long-term occupation of the command channel. */ /* Prevent long-term occupation of the command channel. */
if ((i % 100) == 0) if ((i % HCLGE_DBG_SCAN_STEP) == 0)
msleep(100); msleep(HCLGE_DBG_PAUSE_TIME);
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_PPP_MAC_VLAN_IDX_RD, hclge_cmd_setup_basic_desc(&desc[0], HCLGE_PPP_MAC_VLAN_IDX_RD,
true); true);
...@@ -919,6 +928,19 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) ...@@ -919,6 +928,19 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev)
if (mac_rd_cmd->resp_code) if (mac_rd_cmd->resp_code)
continue; continue;
if (mac_rd_cmd->entry_type == HCLGE_DBG_MAC_MC_TBL) {
mc_mac_tbl[mc_tbl_idx].index = i;
memcpy(mc_mac_tbl[mc_tbl_idx].mac_add,
mac_rd_cmd->mac_add, 6);
memcpy(mc_mac_tbl[mc_tbl_idx].mg_vf_mb,
desc[1].data, 24);
memcpy(&mc_mac_tbl[mc_tbl_idx].mg_vf_mb[24],
desc[2].data, 8);
mc_tbl_idx++;
continue;
}
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN, snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"|%04d |%02x:%02x:%02x:%02x:%02x:%02x |", "|%04d |%02x:%02x:%02x:%02x:%02x:%02x |",
...@@ -941,17 +963,6 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) ...@@ -941,17 +963,6 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev)
mac_rd_cmd->egress_port & HCLGE_DBG_MAC_TBL_E_PORT); mac_rd_cmd->egress_port & HCLGE_DBG_MAC_TBL_E_PORT);
dev_info(&hdev->pdev->dev, "%s", printf_buf); dev_info(&hdev->pdev->dev, "%s", printf_buf);
if (mac_rd_cmd->entry_type == HCLGE_DBG_MAC_MC_TBL) {
mc_mac_tbl[mc_tbl_idx].index = i;
memcpy(mc_mac_tbl[mc_tbl_idx].mac_add,
mac_rd_cmd->mac_add, 6);
memcpy(mc_mac_tbl[mc_tbl_idx].mg_vf_mb,
desc[1].data, 24);
memcpy(&mc_mac_tbl[mc_tbl_idx].mg_vf_mb[24],
desc[2].data, 8);
mc_tbl_idx++;
}
} }
if (mc_tbl_idx > 0) { if (mc_tbl_idx > 0) {
...@@ -1028,7 +1039,7 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev) ...@@ -1028,7 +1039,7 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev)
u32 vlan_id; u32 vlan_id;
int ret; int ret;
vlan_len = HCLGE_DBG_VLAN_ID_MAX / 8; vlan_len = HCLGE_DBG_VLAN_ID_MAX / HCLGE_VLAN_BYTE_SIZE;
vlan_bitmap = kzalloc(vlan_len, GFP_KERNEL); vlan_bitmap = kzalloc(vlan_len, GFP_KERNEL);
if (!vlan_bitmap) { if (!vlan_bitmap) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -1038,15 +1049,15 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev) ...@@ -1038,15 +1049,15 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev)
for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) { for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) {
/* Prevent long-term occupation of the command channel. */ /* Prevent long-term occupation of the command channel. */
if ((vlan_id % 100) == 0) if ((vlan_id % HCLGE_DBG_SCAN_STEP) == 0)
msleep(100); msleep(HCLGE_DBG_PAUSE_TIME);
hclge_cmd_setup_basic_desc(&desc, hclge_cmd_setup_basic_desc(&desc,
HCLGE_OPC_VLAN_FILTER_PF_CFG, true); HCLGE_OPC_VLAN_FILTER_PF_CFG, true);
vlan_offset = vlan_id / 160; vlan_offset = vlan_id / HCLGE_VLAN_ID_B;
vlan_byte = (vlan_id % 160) / 8; vlan_byte = (vlan_id % HCLGE_VLAN_ID_B) / HCLGE_VLAN_BYTE_SIZE;
vlan_byte_val = 1 << (vlan_id % 8); vlan_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset; req->vlan_offset = vlan_offset;
...@@ -1086,7 +1097,7 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf) ...@@ -1086,7 +1097,7 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf)
u16 vf_id; u16 vf_id;
int ret; int ret;
ret = kstrtou16(&cmd_buf[17], 10, &vf_id); ret = kstrtou16(cmd_buf, 0, &vf_id);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"vf id failed. vf id max: %d\n", hdev->num_alloc_vfs); "vf id failed. vf id max: %d\n", hdev->num_alloc_vfs);
...@@ -1103,8 +1114,8 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf) ...@@ -1103,8 +1114,8 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf)
for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) { for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) {
/* Prevent long-term occupation of the command channel. */ /* Prevent long-term occupation of the command channel. */
if ((vlan_id % 100) == 0) if ((vlan_id % HCLGE_DBG_SCAN_STEP) == 0)
msleep(100); msleep(HCLGE_DBG_PAUSE_TIME);
hclge_cmd_setup_basic_desc(&desc[0], hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, true); HCLGE_OPC_VLAN_FILTER_VF_CFG, true);
...@@ -1237,14 +1248,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, ...@@ -1237,14 +1248,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc); sel_x ? "x" : "y", loc);
/* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data; req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++); dev_info(&hdev->pdev->dev, "%08x\n", *req++);
/* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data; req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++); dev_info(&hdev->pdev->dev, "%08x\n", *req++);
/* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data; req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++); dev_info(&hdev->pdev->dev, "%08x\n", *req++);
...@@ -1272,7 +1286,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) ...@@ -1272,7 +1286,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
} else if (strncmp(cmd_buf, "dump tc", 7) == 0) { } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
hclge_dbg_dump_tc(hdev); hclge_dbg_dump_tc(hdev);
} else if (strncmp(cmd_buf, "dump tm map", 11) == 0) { } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
hclge_dbg_dump_tm_map(hdev, cmd_buf); hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof("dump tm map")]);
} else if (strncmp(cmd_buf, "dump tm", 7) == 0) { } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
hclge_dbg_dump_tm(hdev); hclge_dbg_dump_tm(hdev);
} else if (strncmp(cmd_buf, "dump checksum", 13) == 0) { } else if (strncmp(cmd_buf, "dump checksum", 13) == 0) {
...@@ -1288,11 +1302,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) ...@@ -1288,11 +1302,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
} else if (strncmp(cmd_buf, "dump port vlan tbl", 18) == 0) { } else if (strncmp(cmd_buf, "dump port vlan tbl", 18) == 0) {
hclge_dbg_dump_port_vlan_table(hdev); hclge_dbg_dump_port_vlan_table(hdev);
} else if (strncmp(cmd_buf, "dump vf vlan tbl", 16) == 0) { } else if (strncmp(cmd_buf, "dump vf vlan tbl", 16) == 0) {
hclge_dbg_dump_vf_vlan_table(hdev, cmd_buf); int len = sizeof("dump vf vlan tbl");
hclge_dbg_dump_vf_vlan_table(hdev, &cmd_buf[len]);
} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
hclge_dbg_dump_mng_table(hdev); hclge_dbg_dump_mng_table(hdev);
} else if (strncmp(cmd_buf, "dump reg", 8) == 0) { } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
hclge_dbg_dump_reg_cmd(hdev, cmd_buf); hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof("dump reg")]);
} else { } else {
dev_info(&hdev->pdev->dev, "unknown command\n"); dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL; return -EINVAL;
......
...@@ -43,6 +43,9 @@ ...@@ -43,6 +43,9 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12 #define HCLGE_DBG_DFX_SSU_2_OFFSET 12
#define HCLGE_DBG_SCAN_STEP 100
#define HCLGE_DBG_PAUSE_TIME 50
#pragma pack(1) #pragma pack(1)
struct hclge_checksum_cmd { struct hclge_checksum_cmd {
......
...@@ -80,7 +80,7 @@ const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { ...@@ -80,7 +80,7 @@ const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
...@@ -475,19 +475,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, ...@@ -475,19 +475,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
enum hclge_err_int_type int_type) enum hclge_err_int_type int_type)
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
int num = 1; int desc_num = 1;
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true); hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) { if (flag) {
desc[0].flag |= cpu_to_le16(flag); desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true); hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
num = 2; desc_num = 2;
} }
if (w_num) if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type); desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret) if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret); dev_err(dev, "query error cmd failed (%d)\n", ret);
...@@ -718,7 +718,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -718,7 +718,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2]; struct hclge_desc desc[2];
int num = 1; int desc_num = 1;
int ret; int ret;
/* configure PPU error interrupts */ /* configure PPU error interrupts */
...@@ -737,7 +737,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -737,7 +737,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
num = 2; desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false); hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en) if (en)
...@@ -755,7 +755,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -755,7 +755,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
return -EINVAL; return -EINVAL;
} }
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret; return ret;
} }
...@@ -940,8 +940,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, ...@@ -940,8 +940,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[3]; desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
if (status) if (status)
hclge_log_error(dev, "IGU_INT_STS", hclge_log_error(dev, "IGU_INT_STS", &hclge_igu_int[0], status);
&hclge_igu_int[0], status);
/* log PPP(Programmable Packet Process) errors */ /* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4]; desc_data = (__le32 *)&desc[4];
...@@ -1167,8 +1166,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) ...@@ -1167,8 +1166,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
int ret; int ret;
/* read overflow error status */ /* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0); 0, 0, 0);
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
...@@ -1207,10 +1205,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) ...@@ -1207,10 +1205,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
return 0; return 0;
} }
static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) static enum hnae3_reset_type
hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
{ {
enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET; enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2]; struct hclge_desc desc[2];
unsigned int status; unsigned int status;
...@@ -1223,17 +1221,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1223,17 +1221,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
/* reset everything for now */ /* reset everything for now */
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); return HNAE3_GLOBAL_RESET;
return ret;
} }
status = le32_to_cpu(desc[0].data[0]); status = le32_to_cpu(desc[0].data[0]);
if (status & HCLGE_ROCEE_RERR_INT_MASK) if (status & HCLGE_ROCEE_RERR_INT_MASK) {
dev_warn(dev, "ROCEE RAS AXI rresp error\n"); dev_warn(dev, "ROCEE RAS AXI rresp error\n");
return HNAE3_FUNC_RESET;
}
if (status & HCLGE_ROCEE_BERR_INT_MASK) if (status & HCLGE_ROCEE_BERR_INT_MASK) {
dev_warn(dev, "ROCEE RAS AXI bresp error\n"); dev_warn(dev, "ROCEE RAS AXI bresp error\n");
return HNAE3_FUNC_RESET;
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) { if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
...@@ -1245,9 +1246,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1245,9 +1246,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to process ovf error\n", ret); dev_err(dev, "failed(%d) to process ovf error\n", ret);
/* reset everything for now */ /* reset everything for now */
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); return HNAE3_GLOBAL_RESET;
return ret;
} }
reset_type = HNAE3_FUNC_RESET;
} }
/* clear error status */ /* clear error status */
...@@ -1256,12 +1257,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1256,12 +1257,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
/* reset everything for now */ /* reset everything for now */
reset_type = HNAE3_GLOBAL_RESET; return HNAE3_GLOBAL_RESET;
} }
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); return reset_type;
return ret;
} }
static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
...@@ -1293,13 +1292,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) ...@@ -1293,13 +1292,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{ {
enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv; struct hclge_dev *hdev = ae_dev->priv;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21) hdev->pdev->revision < 0x21)
return HNAE3_NONE_RESET; return reset_type;
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
if (reset_type != HNAE3_NONE_RESET)
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
return hclge_log_and_clear_rocee_ras_error(hdev); return reset_type;
} }
static const struct hclge_hw_blk hw_blk[] = { static const struct hclge_hw_blk hw_blk[] = {
...@@ -1426,8 +1430,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1426,8 +1430,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n", dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
goto msi_error; goto msi_error;
} }
...@@ -1446,9 +1449,8 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1446,9 +1449,8 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
status = le32_to_cpu(*(desc_data + 2)) & status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status) { if (status) {
dev_warn(dev, hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
"PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", &hclge_ppu_mpf_abnormal_int_st2[0], status);
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
} }
...@@ -1458,8 +1460,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1458,8 +1460,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
goto msi_error; goto msi_error;
} }
...@@ -1472,8 +1473,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1472,8 +1473,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n", dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
goto msi_error; goto msi_error;
} }
...@@ -1506,8 +1506,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1506,8 +1506,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "clear all pf msix int cmd failed (%d)\n", dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
} }
......
...@@ -62,6 +62,9 @@ ...@@ -62,6 +62,9 @@
#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
#define HCLGE_VLAN_ID_B 160
#define HCLGE_VLAN_BYTE_SIZE 8
#define HCLGE_TQP_RESET_TRY_TIMES 10 #define HCLGE_TQP_RESET_TRY_TIMES 10
#define HCLGE_PHY_PAGE_MDIX 0 #define HCLGE_PHY_PAGE_MDIX 0
...@@ -190,7 +193,7 @@ struct hclge_mac { ...@@ -190,7 +193,7 @@ struct hclge_mac {
u8 autoneg; u8 autoneg;
u8 duplex; u8 duplex;
u32 speed; u32 speed;
int link; /* store the link status of mac & phy (if phy exit)*/ int link; /* store the link status of mac & phy (if phy exit) */
struct phy_device *phydev; struct phy_device *phydev;
struct mii_bus *mdio_bus; struct mii_bus *mdio_bus;
phy_interface_t phy_if; phy_interface_t phy_if;
...@@ -406,6 +409,7 @@ enum HCLGE_FD_KEY_TYPE { ...@@ -406,6 +409,7 @@ enum HCLGE_FD_KEY_TYPE {
enum HCLGE_FD_STAGE { enum HCLGE_FD_STAGE {
HCLGE_FD_STAGE_1, HCLGE_FD_STAGE_1,
HCLGE_FD_STAGE_2, HCLGE_FD_STAGE_2,
MAX_STAGE_NUM,
}; };
/* OUTER_XXX indicates tuples in tunnel header of tunnel packet /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
...@@ -460,7 +464,7 @@ enum HCLGE_FD_META_DATA { ...@@ -460,7 +464,7 @@ enum HCLGE_FD_META_DATA {
struct key_info { struct key_info {
u8 key_type; u8 key_type;
u8 key_length; u8 key_length; /* use bit as unit */
}; };
static const struct key_info meta_data_key_info[] = { static const struct key_info meta_data_key_info[] = {
...@@ -534,18 +538,23 @@ struct hclge_fd_key_cfg { ...@@ -534,18 +538,23 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg { struct hclge_fd_cfg {
u8 fd_mode; u8 fd_mode;
u16 max_key_length; u16 max_key_length; /* use bit as unit */
u32 proto_support; u32 proto_support;
u32 rule_num[2]; /* rule entry number */ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[2]; /* rule hit counter number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[2]; struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
}; };
#define IPV4_INDEX 3
#define IPV6_SIZE 4
struct hclge_fd_rule_tuples { struct hclge_fd_rule_tuples {
u8 src_mac[6]; u8 src_mac[ETH_ALEN];
u8 dst_mac[6]; u8 dst_mac[ETH_ALEN];
u32 src_ip[4]; /* Be compatible for ip address of both ipv4 and ipv6.
u32 dst_ip[4]; * For ipv4 address, we store it in src/dst_ip[3].
*/
u32 src_ip[IPV6_SIZE];
u32 dst_ip[IPV6_SIZE];
u16 src_port; u16 src_port;
u16 dst_port; u16 dst_port;
u16 vlan_tag1; u16 vlan_tag1;
...@@ -581,7 +590,6 @@ struct hclge_fd_ad_data { ...@@ -581,7 +590,6 @@ struct hclge_fd_ad_data {
struct hclge_vport_mac_addr_cfg { struct hclge_vport_mac_addr_cfg {
struct list_head node; struct list_head node;
int vport_id;
int hd_tbl_status; int hd_tbl_status;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
}; };
...@@ -739,8 +747,6 @@ struct hclge_dev { ...@@ -739,8 +747,6 @@ struct hclge_dev {
struct mutex umv_mutex; /* protect share_umv_size */ struct mutex umv_mutex; /* protect share_umv_size */
struct mutex vport_cfg_mutex; /* Protect stored vf table */ struct mutex vport_cfg_mutex; /* Protect stored vf table */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
}; };
/* VPort level vlan tag configuration for TX direction */ /* VPort level vlan tag configuration for TX direction */
...@@ -780,6 +786,17 @@ enum HCLGE_VPORT_STATE { ...@@ -780,6 +786,17 @@ enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_MAX HCLGE_VPORT_STATE_MAX
}; };
#pragma pack(1)
struct hclge_vf_vlan_cfg {
u8 mbx_cmd;
u8 subcode;
u8 is_kill;
u16 vlan;
u16 proto;
};
#pragma pack()
struct hclge_vlan_info { struct hclge_vlan_info {
u16 vlan_proto; /* sofar support 802.1Q only */ u16 vlan_proto; /* sofar support 802.1Q only */
u16 qos; u16 qos;
...@@ -822,6 +839,9 @@ struct hclge_vport { ...@@ -822,6 +839,9 @@ struct hclge_vport {
unsigned long state; unsigned long state;
unsigned long last_active_jiffies; unsigned long last_active_jiffies;
int mps; /* Max packet size */ int mps; /* Max packet size */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
}; };
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
......
...@@ -308,34 +308,34 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, ...@@ -308,34 +308,34 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req) struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{ {
struct hclge_vf_vlan_cfg *msg_cmd;
int status = 0; int status = 0;
if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
struct hnae3_handle *handle = &vport->nic; struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto; u16 vlan, proto;
bool is_kill; bool is_kill;
is_kill = !!mbx_req->msg[2]; is_kill = !!msg_cmd->is_kill;
memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); vlan = msg_cmd->vlan;
memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill); vlan, is_kill);
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic; struct hnae3_handle *handle = &vport->nic;
bool en = mbx_req->msg[2] ? true : false; bool en = msg_cmd->is_kill ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en); status = hclge_en_hw_strip_rxvtag(handle, en);
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) { } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
struct hclge_vlan_info vlan_info; struct hclge_vlan_info *vlan_info;
u16 state; u16 *state;
memcpy(&state, &mbx_req->msg[2], sizeof(u16)); state = (u16 *)&mbx_req->msg[2];
memcpy(&vlan_info.vlan_tag, &mbx_req->msg[4], sizeof(u16)); vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
memcpy(&vlan_info.qos, &mbx_req->msg[6], sizeof(u16)); status = hclge_update_port_base_vlan_cfg(vport, *state,
memcpy(&vlan_info.vlan_proto, &mbx_req->msg[8], sizeof(u16)); vlan_info);
status = hclge_update_port_base_vlan_cfg(vport, state, } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
&vlan_info);
} else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
u8 state; u8 state;
state = vport->port_base_vlan_cfg.state; state = vport->port_base_vlan_cfg.state;
...@@ -373,7 +373,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport, ...@@ -373,7 +373,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
vf_tc_map |= BIT(i); vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map, ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
sizeof(u8)); sizeof(vf_tc_map));
return ret; return ret;
} }
...@@ -410,24 +410,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport, ...@@ -410,24 +410,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
HCLGE_TQPS_DEPTH_INFO_LEN); HCLGE_TQPS_DEPTH_INFO_LEN);
} }
static int hclge_get_vf_media_type(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_dev *hdev = vport->back;
u8 resp_data;
resp_data = hdev->hw.mac.media_type;
return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data,
sizeof(resp_data));
}
static int hclge_get_link_info(struct hclge_vport *vport, static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req) struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 link_status; u16 link_status;
u8 msg_data[10]; u8 msg_data[8];
u16 media_type;
u8 dest_vfid; u8 dest_vfid;
u16 duplex; u16 duplex;
/* mac.link can only be 0 or 1 */ /* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link; link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex; duplex = hdev->hw.mac.duplex;
media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16)); memcpy(&msg_data[6], &duplex, sizeof(u16));
memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid; dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */ /* send this requested info to VF */
...@@ -669,11 +677,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -669,11 +677,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret); ret);
break; break;
case HCLGE_MBX_GET_VF_FLR_STATUS: case HCLGE_MBX_GET_VF_FLR_STATUS:
mutex_lock(&hdev->vport_cfg_mutex);
hclge_rm_vport_all_mac_table(vport, true, hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC); HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true, hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_MC); HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true); hclge_rm_vport_all_vlan_table(vport, true);
mutex_unlock(&hdev->vport_cfg_mutex);
break; break;
case HCLGE_MBX_GET_RSS_KEY: case HCLGE_MBX_GET_RSS_KEY:
ret = hclge_get_rss_key(vport, req); ret = hclge_get_rss_key(vport, req);
...@@ -684,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -684,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_GET_LINK_MODE: case HCLGE_MBX_GET_LINK_MODE:
hclge_get_vf_link_mode(vport, req); hclge_get_vf_link_mode(vport, req);
break; break;
case HCLGE_MBX_GET_MEDIA_TYPE:
ret = hclge_get_vf_media_type(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to media type for VF\n",
ret);
break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n", "un-supported mailbox message, code = %d\n",
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/marvell_phy.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include "kcompat.h" #include "kcompat.h"
#include "hclge_cmd.h" #include "hclge_cmd.h"
...@@ -27,17 +28,6 @@ enum hclge_mdio_c22_op_seq { ...@@ -27,17 +28,6 @@ enum hclge_mdio_c22_op_seq {
#define HCLGE_MDIO_STA_B 0 #define HCLGE_MDIO_STA_B 0
struct hclge_mdio_cfg_cmd {
u8 ctrl_bit;
u8 phyid;
u8 phyad;
u8 rsvd;
__le16 reserve;
__le16 data_wr;
__le16 data_rd;
__le16 sta;
};
static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
u16 data) u16 data)
{ {
...@@ -119,6 +109,13 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) ...@@ -119,6 +109,13 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return le16_to_cpu(mdio_cmd->data_rd); return le16_to_cpu(mdio_cmd->data_rd);
} }
static int hclge_phy_marvell_fixup(struct phy_device *phydev)
{
phydev->dev_flags |= MARVELL_PHY_M1510_HNS3_LEDS;
return 0;
}
int hclge_mac_mdio_config(struct hclge_dev *hdev) int hclge_mac_mdio_config(struct hclge_dev *hdev)
{ {
struct hclge_mac *mac = &hdev->hw.mac; struct hclge_mac *mac = &hdev->hw.mac;
...@@ -162,6 +159,15 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) ...@@ -162,6 +159,15 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mac->phydev = phydev; mac->phydev = phydev;
mac->mdio_bus = mdio_bus; mac->mdio_bus = mdio_bus;
/* register the PHY board fixup (for Marvell 88E1510) */
ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510,
MARVELL_PHY_ID_MASK,
hclge_phy_marvell_fixup);
/* we can live without it, so just issue a warning */
if (ret)
dev_warn(&hdev->pdev->dev,
"Cannot register PHY board fixup\n");
return 0; return 0;
} }
...@@ -195,11 +201,29 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) ...@@ -195,11 +201,29 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct net_device *netdev = hdev->vport[0].nic.netdev; struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev; struct phy_device *phydev = hdev->hw.mac.phydev;
#ifdef HAS_LINK_MODE_OPS
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
#endif
int ret; int ret;
if (!phydev) if (!phydev)
return 0; return 0;
#ifdef HAS_LINK_MODE_OPS
linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
if (ret) {
netdev_err(netdev, "phy_connect_direct err.\n");
return ret;
}
linkmode_copy(mask, hdev->hw.mac.supported);
linkmode_and(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
#else
phydev->supported &= ~SUPPORTED_FIBRE; phydev->supported &= ~SUPPORTED_FIBRE;
ret = phy_connect_direct(netdev, phydev, ret = phy_connect_direct(netdev, phydev,
...@@ -212,7 +236,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) ...@@ -212,7 +236,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
phydev->supported &= *hdev->hw.mac.supported; phydev->supported &= *hdev->hw.mac.supported;
phydev->advertising = phydev->supported; phydev->advertising = phydev->supported;
#endif
return 0; return 0;
} }
...@@ -225,6 +249,9 @@ void hclge_mac_disconnect_phy(struct hnae3_handle *handle) ...@@ -225,6 +249,9 @@ void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
if (!phydev) if (!phydev)
return; return;
phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510,
MARVELL_PHY_ID_MASK);
phy_disconnect(phydev); phy_disconnect(phydev);
} }
......
...@@ -4,6 +4,17 @@ ...@@ -4,6 +4,17 @@
#ifndef __HCLGE_MDIO_H #ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H #define __HCLGE_MDIO_H
struct hclge_mdio_cfg_cmd {
u8 ctrl_bit;
u8 phyid;
u8 phyad;
u8 rsvd;
__le16 reserve;
__le16 data_wr;
__le16 data_rd;
__le16 sta;
};
int hclge_mac_mdio_config(struct hclge_dev *hdev); int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle); int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
......
...@@ -43,13 +43,17 @@ enum hclge_shaper_level { ...@@ -43,13 +43,17 @@ enum hclge_shaper_level {
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
u8 *ir_b, u8 *ir_u, u8 *ir_s) u8 *ir_b, u8 *ir_u, u8 *ir_s)
{ {
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */ 6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */ 6 * 32, /* Prioriy group level */
6 * 8, /* Port level */ 6 * 8, /* Port level */
6 * 256 /* Qset level */ 6 * 256 /* Qset level */
}; };
u8 ir_u_calc = 0, ir_s_calc = 0; u8 ir_u_calc = 0;
u8 ir_s_calc = 0;
u32 ir_calc; u32 ir_calc;
u32 tick; u32 tick;
...@@ -66,7 +70,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -66,7 +70,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000 * ir_calc = ---------------- * 1000
* tick * 1 * tick * 1
*/ */
ir_calc = (1008000 + (tick >> 1) - 1) / tick; ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) { if (ir_calc == ir) {
*ir_b = 126; *ir_b = 126;
...@@ -78,27 +82,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -78,27 +82,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */ /* Increasing the denominator to select ir_s value */
while (ir_calc > ir) { while (ir_calc > ir) {
ir_s_calc++; ir_s_calc++;
ir_calc = 1008000 / (tick * (1 << ir_s_calc)); ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
} }
if (ir_calc == ir) if (ir_calc == ir)
*ir_b = 126; *ir_b = 126;
else else
*ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; *ir_b = (ir * tick * (1 << ir_s_calc) +
(DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else { } else {
/* Increasing the numerator to select ir_u value */ /* Increasing the numerator to select ir_u value */
u32 numerator; u32 numerator;
while (ir_calc < ir) { while (ir_calc < ir) {
ir_u_calc++; ir_u_calc++;
numerator = 1008000 * (1 << ir_u_calc); numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick; ir_calc = (numerator + (tick >> 1)) / tick;
} }
if (ir_calc == ir) { if (ir_calc == ir) {
*ir_b = 126; *ir_b = 126;
} else { } else {
u32 denominator = (8000 * (1 << --ir_u_calc)); u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
*ir_b = (ir * tick + (denominator >> 1)) / denominator; *ir_b = (ir * tick + (denominator >> 1)) / denominator;
} }
} }
...@@ -119,14 +124,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev, ...@@ -119,14 +124,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
return -EINVAL; return -EINVAL;
for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], opcode, true); hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
} }
hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
if (ret) if (ret)
return ret; return ret;
...@@ -219,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) ...@@ -219,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
trans_gap = pause_param->pause_trans_gap; trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time); trans_time = le16_to_cpu(pause_param->pause_trans_time);
return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
trans_time);
} }
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
...@@ -361,14 +364,27 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, ...@@ -361,14 +364,27 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
} }
static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
u8 bs_b, u8 bs_s)
{
u32 shapping_para = 0;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
return shapping_para;
}
static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pg_id, enum hclge_shap_bucket bucket, u8 pg_id,
u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) u32 shapping_para)
{ {
struct hclge_pg_shapping_cmd *shap_cfg_cmd; struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode; enum hclge_opcode_type opcode;
struct hclge_desc desc; struct hclge_desc desc;
u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
HCLGE_OPC_TM_PG_C_SHAPPING; HCLGE_OPC_TM_PG_C_SHAPPING;
...@@ -378,12 +394,6 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, ...@@ -378,12 +394,6 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id; shap_cfg_cmd->pg_id = pg_id;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -406,11 +416,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) ...@@ -406,11 +416,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
hclge_tm_set_field(shapping_para, IR_B, ir_b); shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
hclge_tm_set_field(shapping_para, IR_U, ir_u); HCLGE_SHAPER_BS_U_DEF,
hclge_tm_set_field(shapping_para, IR_S, ir_s); HCLGE_SHAPER_BS_S_DEF);
hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
...@@ -419,13 +427,11 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) ...@@ -419,13 +427,11 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pri_id, enum hclge_shap_bucket bucket, u8 pri_id,
u8 ir_b, u8 ir_u, u8 ir_s, u32 shapping_para)
u8 bs_b, u8 bs_s)
{ {
struct hclge_pri_shapping_cmd *shap_cfg_cmd; struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode; enum hclge_opcode_type opcode;
struct hclge_desc desc; struct hclge_desc desc;
u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
HCLGE_OPC_TM_PRI_C_SHAPPING; HCLGE_OPC_TM_PRI_C_SHAPPING;
...@@ -436,12 +442,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, ...@@ -436,12 +442,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id; shap_cfg_cmd->pri_id = pri_id;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
max_rss_size = min_t(u16, hdev->rss_size_max, max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc); vport->alloc_tqps / kinfo->num_tc);
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) { kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
...@@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size; kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size || } else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
/* Set to the maximum specification value (max_rss_size). */
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size); kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size; kinfo->rss_size = max_rss_size;
...@@ -604,12 +606,13 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) ...@@ -604,12 +606,13 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
static void hclge_tm_pg_info_init(struct hclge_dev *hdev) static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{ {
#define BW_PERCENT 100
u8 i; u8 i;
for (i = 0; i < hdev->tm_info.num_pg; i++) { for (i = 0; i < hdev->tm_info.num_pg; i++) {
int k; int k;
hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
hdev->tm_info.pg_info[i].pg_id = i; hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
...@@ -621,7 +624,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) ...@@ -621,7 +624,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++) for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
} }
} }
...@@ -682,6 +685,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) ...@@ -682,6 +685,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{ {
u8 ir_u, ir_b, ir_s; u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret; int ret;
u32 i; u32 i;
...@@ -699,18 +703,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) ...@@ -699,18 +703,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev, ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_C_BUCKET, i, HCLGE_TM_SHAP_C_BUCKET, i,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF, shaper_para);
HCLGE_SHAPER_BS_S_DEF);
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pg_shapping_cfg(hdev, shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
HCLGE_TM_SHAP_P_BUCKET, i,
ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
shaper_para);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -730,8 +737,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) ...@@ -730,8 +737,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
/* pg to prio */ /* pg to prio */
for (i = 0; i < hdev->tm_info.num_pg; i++) { for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Cfg dwrr */ /* Cfg dwrr */
ret = hclge_tm_pg_weight_cfg(hdev, i, ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
hdev->tm_info.pg_dwrr[i]);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -811,6 +817,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) ...@@ -811,6 +817,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{ {
u8 ir_u, ir_b, ir_s; u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret; int ret;
u32 i; u32 i;
...@@ -822,17 +829,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) ...@@ -822,17 +829,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg( shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
hdev, HCLGE_TM_SHAP_C_BUCKET, i, HCLGE_SHAPER_BS_U_DEF,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
shaper_para);
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg( shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
hdev, HCLGE_TM_SHAP_P_BUCKET, i, HCLGE_SHAPER_BS_U_DEF,
ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
shaper_para);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -844,6 +853,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) ...@@ -844,6 +853,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 ir_u, ir_b, ir_s; u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret; int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
...@@ -851,18 +861,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) ...@@ -851,18 +861,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
vport->vport_id, HCLGE_SHAPER_BS_U_DEF,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
vport->vport_id, shaper_para);
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
vport->vport_id,
ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
vport->vport_id, shaper_para);
if (ret) if (ret)
return ret; return ret;
...@@ -1333,8 +1344,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) ...@@ -1333,8 +1344,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
ret = hclge_pfc_setup_hw(hdev); ret = hclge_pfc_setup_hw(hdev);
if (init && ret == -EOPNOTSUPP) if (init && ret == -EOPNOTSUPP)
dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
else else if (ret) {
dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
ret);
return ret; return ret;
}
return hclge_tm_bp_setup(hdev); return hclge_tm_bp_setup(hdev);
} }
...@@ -1357,7 +1371,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) ...@@ -1357,7 +1371,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{ {
u8 i, bit_map = 0; u8 bit_map = 0;
u8 i;
hdev->tm_info.num_tc = num_tc; hdev->tm_info.num_tc = num_tc;
......
...@@ -81,7 +81,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) ...@@ -81,7 +81,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
if (ring->flag == HCLGEVF_TYPE_CSQ) { if (ring->flag == HCLGEVF_TYPE_CSQ) {
reg_val = (u32)ring->desc_dma_addr; reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); reg_val = (u32)(ring->desc_dma_addr >>
HCLGEVF_RING_BASEADDR_SHIFT);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
...@@ -93,7 +94,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) ...@@ -93,7 +94,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
} else { } else {
reg_val = (u32)ring->desc_dma_addr; reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); reg_val = (u32)(ring->desc_dma_addr >>
HCLGEVF_RING_BASEADDR_SHIFT);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
...@@ -363,8 +365,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) ...@@ -363,8 +365,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
return 0; return 0;
} }
static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
{
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
}
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{ {
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock_bh(&hdev->hw.cmq.crq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
hclgevf_cmd_uninit_regs(&hdev->hw);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
} }
...@@ -242,6 +242,8 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { ...@@ -242,6 +242,8 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
#define HCLGEVF_RING_BASEADDR_SHIFT 32
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{ {
writel(value, base + reg); writel(value, base + reg);
......
...@@ -47,8 +47,7 @@ static const u8 hclgevf_hash_key[] = { ...@@ -47,8 +47,7 @@ static const u8 hclgevf_hash_key[] = {
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
struct hnae3_handle *handle)
{ {
if (!handle->client) if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic); return container_of(handle, struct hclgevf_dev, nic);
...@@ -179,10 +178,8 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) ...@@ -179,10 +178,8 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
u8 *data) u8 *data)
{ {
u8 *p = (char *)data;
if (strset == ETH_SS_STATS) if (strset == ETH_SS_STATS)
p = hclgevf_tqps_get_strings(handle, p); (void)hclgevf_tqps_get_strings(handle, data);
} }
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
...@@ -196,7 +193,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) ...@@ -196,7 +193,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
int status; int status;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
true, &resp_msg, sizeof(u8)); true, &resp_msg, sizeof(resp_msg));
if (status) { if (status) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d", "VF request to get TC info from PF failed %d",
...@@ -285,13 +282,33 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) ...@@ -285,13 +282,33 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
2, true, resp_data, 2); sizeof(msg_data), true, resp_data,
sizeof(resp_data));
if (!ret) if (!ret)
qid_in_pf = *(u16 *)resp_data; qid_in_pf = *(u16 *)resp_data;
return qid_in_pf; return qid_in_pf;
} }
static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
{
u8 resp_msg;
int ret;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
true, &resp_msg, sizeof(resp_msg));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get the pf port media type failed %d",
ret);
return ret;
}
hdev->hw.mac.media_type = resp_msg;
return 0;
}
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{ {
struct hclgevf_tqp *tqp; struct hclgevf_tqp *tqp;
...@@ -362,7 +379,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev) ...@@ -362,7 +379,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
u8 resp_msg; u8 resp_msg;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
0, false, &resp_msg, sizeof(u8)); 0, false, &resp_msg, sizeof(resp_msg));
if (status) if (status)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status); "VF failed to fetch link status(%d) from PF", status);
...@@ -397,11 +414,13 @@ void hclgevf_update_link_mode(struct hclgevf_dev *hdev) ...@@ -397,11 +414,13 @@ void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
u8 resp_msg; u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING; send_msg = HCLGEVF_ADVERTISING;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
sizeof(u8), false, &resp_msg, sizeof(u8)); &send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
send_msg = HCLGEVF_SUPPORTED; send_msg = HCLGEVF_SUPPORTED;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
sizeof(u8), false, &resp_msg, sizeof(u8)); &send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
} }
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
...@@ -489,13 +508,14 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, ...@@ -489,13 +508,14 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
{ {
struct hclgevf_rss_config_cmd *req; struct hclgevf_rss_config_cmd *req;
struct hclgevf_desc desc; struct hclgevf_desc desc;
int key_offset; int key_offset = 0;
int key_counts;
int key_size; int key_size;
int ret; int ret;
key_counts = HCLGEVF_RSS_KEY_SIZE;
req = (struct hclgevf_rss_config_cmd *)desc.data; req = (struct hclgevf_rss_config_cmd *)desc.data;
while (key_counts) {
for (key_offset = 0; key_offset < 3; key_offset++) {
hclgevf_cmd_setup_basic_desc(&desc, hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_RSS_GENERIC_CONFIG, HCLGEVF_OPC_RSS_GENERIC_CONFIG,
false); false);
...@@ -504,15 +524,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, ...@@ -504,15 +524,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
req->hash_config |= req->hash_config |=
(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
if (key_offset == 2) if (key_counts >= HCLGEVF_RSS_HASH_KEY_NUM)
key_size =
HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
else
key_size = HCLGEVF_RSS_HASH_KEY_NUM; key_size = HCLGEVF_RSS_HASH_KEY_NUM;
else
key_size = key_counts;
memcpy(req->hash_key, memcpy(req->hash_key,
key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -1137,7 +1157,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, ...@@ -1137,7 +1157,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
HCLGE_MBX_MAC_VLAN_UC_MODIFY; HCLGE_MBX_MAC_VLAN_UC_MODIFY;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
subcode, msg_data, ETH_ALEN * 2, subcode, msg_data, sizeof(msg_data),
true, NULL, 0); true, NULL, 0);
if (!status) if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
...@@ -1193,7 +1213,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1193,7 +1213,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
if (vlan_id > 4095) if (vlan_id > MAX_VLAN_ID)
return -EINVAL; return -EINVAL;
if (proto != htons(ETH_P_8021Q)) if (proto != htons(ETH_P_8021Q))
...@@ -1227,7 +1247,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1227,7 +1247,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
return 0; return 0;
memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); memcpy(msg_data, &queue_id, sizeof(queue_id));
/* disable vf queue before send queue reset msg to PF */ /* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
...@@ -1235,7 +1255,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1235,7 +1255,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret; return ret;
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
2, true, NULL, 0); sizeof(msg_data), true, NULL, 0);
} }
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
...@@ -1371,7 +1391,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) ...@@ -1371,7 +1391,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
switch (hdev->reset_type) { switch (hdev->reset_type) {
case HNAE3_VF_FUNC_RESET: case HNAE3_VF_FUNC_RESET:
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8)); 0, true, NULL, 0);
break; break;
case HNAE3_FLR_RESET: case HNAE3_FLR_RESET:
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
...@@ -1493,7 +1513,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev, ...@@ -1493,7 +1513,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv; struct hclgevf_dev *hdev = ae_dev->priv;
if (time_before(jiffies, (hdev->last_reset_time + 5 * HZ))) if (time_before(jiffies, (hdev->last_reset_time +
HCLGEVF_RESET_TASK_INTERVAL * HZ)))
return; return;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
...@@ -1603,8 +1624,10 @@ static void hclgevf_service_timer(struct timer_list *t) ...@@ -1603,8 +1624,10 @@ static void hclgevf_service_timer(struct timer_list *t)
{ {
struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + 5 * HZ); mod_timer(&hdev->service_timer, jiffies +
HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
hdev->stats_timer++;
hclgevf_task_schedule(hdev); hclgevf_task_schedule(hdev);
} }
...@@ -1623,8 +1646,8 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1623,8 +1646,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
&hdev->reset_state)) { &hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware. /* PF has initmated that it is about to reset the hardware.
* We now have to poll & check if hardware has actually * We now have to poll & check if hardware has actually
* completed the reset sequence. On hardware reset * completed the reset sequence. On hardware reset completion,
* completion, VF needs to reset the client and ae device. * VF needs to reset the client and ae device.
*/ */
hdev->reset_attempts = 0; hdev->reset_attempts = 0;
...@@ -1640,7 +1663,7 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1640,7 +1663,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) { &hdev->reset_state)) {
/* we could be here when either of below happens: /* we could be here when either of below happens:
* 1. reset was initiated due to watchdog timeout due to * 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and * a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF * which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable. * reset. This also means our cmdq would be unreliable.
...@@ -1702,7 +1725,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t) ...@@ -1702,7 +1725,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t)
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
schedule_work(&hdev->keep_alive_task); schedule_work(&hdev->keep_alive_task);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
} }
static void hclgevf_keep_alive_task(struct work_struct *work) static void hclgevf_keep_alive_task(struct work_struct *work)
...@@ -1713,11 +1737,11 @@ static void hclgevf_keep_alive_task(struct work_struct *work) ...@@ -1713,11 +1737,11 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
hdev = container_of(work, struct hclgevf_dev, keep_alive_task); hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return; return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
0, false, &respmsg, sizeof(u8)); 0, false, &respmsg, sizeof(respmsg));
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret); "VF sends keep alive cmd failed(=%d)\n", ret);
...@@ -1725,9 +1749,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work) ...@@ -1725,9 +1749,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
static void hclgevf_service_task(struct work_struct *work) static void hclgevf_service_task(struct work_struct *work)
{ {
struct hnae3_handle *handle;
struct hclgevf_dev *hdev; struct hclgevf_dev *hdev;
hdev = container_of(work, struct hclgevf_dev, service_task); hdev = container_of(work, struct hclgevf_dev, service_task);
handle = &hdev->nic;
if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
hclgevf_tqps_update_stats(handle);
hdev->stats_timer = 0;
}
/* request the link status from the PF. PF would be able to tell VF /* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later * about such updates in future so we might remove this later
...@@ -1831,6 +1862,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) ...@@ -1831,6 +1862,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret) if (ret)
return ret; return ret;
ret = hclgevf_get_pf_media_type(hdev);
if (ret)
return ret;
/* get tc configuration from PF */ /* get tc configuration from PF */
return hclgevf_get_tc_info(hdev); return hclgevf_get_tc_info(hdev);
} }
...@@ -1932,7 +1967,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -1932,7 +1967,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
return ret; return ret;
} }
/* Initialize RSS indirect table for each vport */ /* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
...@@ -1945,9 +1980,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -1945,9 +1980,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{ {
/* other vlan config(like, VLAN TX/RX offload) would also be added
* here later
*/
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false); false);
} }
...@@ -1969,7 +2001,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) ...@@ -1969,7 +2001,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev); hclgevf_request_link_info(hdev);
...@@ -1991,7 +2022,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) ...@@ -1991,7 +2022,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
for (i = 0; i < handle->kinfo.num_tqps; i++) for (i = 0; i < handle->kinfo.num_tqps; i++)
hclgevf_reset_tqp(handle, i); hclgevf_reset_tqp(handle, i);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0); hclgevf_update_link_status(hdev, 0);
} }
...@@ -2010,7 +2040,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle) ...@@ -2010,7 +2040,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
return hclgevf_set_alive(handle, true); return hclgevf_set_alive(handle, true);
} }
......
...@@ -12,10 +12,15 @@ ...@@ -12,10 +12,15 @@
#define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf" #define HCLGEVF_DRIVER_NAME "hclgevf"
#define MAX_VLAN_ID 4095
#define HCLGEVF_MISC_VECTOR_NUM 0 #define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff #define HCLGEVF_INVALID_VPORT 0xffff
#define HCLGEVF_RESET_TASK_INTERVAL 5
#define HCLGEVF_GENERAL_TASK_INTERVAL 5
#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
/* This number in actual depends upon the total number of VFs /* This number in actual depends upon the total number of VFs
* created by physical function. But the maximum number of * created by physical function. But the maximum number of
* possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
...@@ -62,6 +67,8 @@ ...@@ -62,6 +67,8 @@
#define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4) #define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_STATS_TIMER_INTERVAL (36)
enum hclgevf_evt_cause { enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX, HCLGEVF_VECTOR0_EVENT_MBX,
...@@ -220,6 +227,7 @@ struct hclgevf_dev { ...@@ -220,6 +227,7 @@ struct hclgevf_dev {
struct hnae3_client *nic_client; struct hnae3_client *nic_client;
struct hnae3_client *roce_client; struct hnae3_client *roce_client;
u32 flag; u32 flag;
u32 stats_timer;
}; };
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
......
...@@ -273,7 +273,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -273,7 +273,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]); link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed)); memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]); duplex = (u8)le16_to_cpu(msg_q[4]);
hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */ /* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status); hclgevf_update_link_status(hdev, link_status);
......
...@@ -378,5 +378,19 @@ static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) ...@@ -378,5 +378,19 @@ static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
#else #else
#endif #endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 4))
#include <linux/bitmap.h>
static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
{
__set_bit(nr, addr);
}
#else
#define HAS_LINK_MODE_OPS
#endif
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册