提交 4645e3f4 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: hns3: update hns3 driver from driver team

driver inclusion
category: feature

-----------------------------------------

Based on add763cbef9424c6ea624dce6d6d2d51048cf9da
("net: hns3: Reduce resources use in kdump kernel")
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 bea163a4
...@@ -44,6 +44,7 @@ enum HCLGE_MBX_OPCODE { ...@@ -44,6 +44,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
}; };
...@@ -68,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode { ...@@ -68,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
}; };
#define HCLGE_MBX_MAX_MSG_SIZE 16 #define HCLGE_MBX_MAX_MSG_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
...@@ -86,6 +87,7 @@ struct hclge_mbx_vf_to_pf_cmd { ...@@ -86,6 +87,7 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 rsv1[2]; u8 rsv1[2];
u8 msg_len; u8 msg_len;
u8 rsv2[3]; u8 rsv2[3];
/* msg[0] means opcode and msg[1] means sub opcode, other is msg data */
u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
}; };
...@@ -94,6 +96,7 @@ struct hclge_mbx_pf_to_vf_cmd { ...@@ -94,6 +96,7 @@ struct hclge_mbx_pf_to_vf_cmd {
u8 rsv[3]; u8 rsv[3];
u8 msg_len; u8 msg_len;
u8 rsv1[3]; u8 rsv1[3];
/* msg[0] means OPCODE, other is msg data */
u16 msg[8]; u16 msg[8];
}; };
......
...@@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, ...@@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
void hnae3_set_client_init_flag(struct hnae3_client *client, void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited) struct hnae3_ae_dev *ae_dev, int inited)
{ {
if (!client || !ae_dev)
return;
switch (client->type) { switch (client->type) {
case HNAE3_CLIENT_KNIC: case HNAE3_CLIENT_KNIC:
hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
...@@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client) ...@@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client)
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
int ret = 0; int ret = 0;
if (!client)
return -ENODEV;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* one system should only have one client for every type */ /* one system should only have one client for every type */
list_for_each_entry(client_tmp, &hnae3_client_list, node) { list_for_each_entry(client_tmp, &hnae3_client_list, node) {
...@@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client) ...@@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client)
{ {
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
if (!client)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */ /* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
...@@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
struct hnae3_client *client; struct hnae3_client *client;
int ret = 0; int ret = 0;
if (!ae_algo)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
...@@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
if (!id) if (!id)
continue; continue;
/* ae_dev init should set flag */ if (!ae_algo->ops) {
dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
continue;
}
ae_dev->ops = ae_algo->ops; ae_dev->ops = ae_algo->ops;
ret = ae_algo->ops->init_ae_dev(ae_dev); ret = ae_algo->ops->init_ae_dev(ae_dev);
if (ret) { if (ret) {
dev_err(&ae_dev->pdev->dev, dev_err(&ae_dev->pdev->dev,
...@@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
continue; continue;
} }
/* ae_dev init should set flag */
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and /* check the client list for the match with this ae_dev type and
...@@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client; struct hnae3_client *client;
if (!ae_algo)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */ /* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
...@@ -245,6 +265,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -245,6 +265,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hnae3_client *client; struct hnae3_client *client;
int ret = 0; int ret = 0;
if (!ae_dev)
return -ENODEV;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
...@@ -255,15 +278,13 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -255,15 +278,13 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!id) if (!id)
continue; continue;
ae_dev->ops = ae_algo->ops; if (!ae_algo->ops) {
dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
if (!ae_dev->ops) {
dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out_err; goto out_err;
} }
ae_dev->ops = ae_algo->ops;
/* ae_dev init should set flag */
ret = ae_dev->ops->init_ae_dev(ae_dev); ret = ae_dev->ops->init_ae_dev(ae_dev);
if (ret) { if (ret) {
dev_err(&ae_dev->pdev->dev, dev_err(&ae_dev->pdev->dev,
...@@ -271,6 +292,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -271,6 +292,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
goto out_err; goto out_err;
} }
/* ae_dev init should set flag */
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break; break;
} }
...@@ -307,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -307,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hnae3_ae_algo *ae_algo; struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client; struct hnae3_client *client;
if (!ae_dev)
return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */ /* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
......
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
#include "hnae3.h" #include "hnae3.h"
#include "hns3_enet.h" #include "hns3_enet.h"
static static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) ...@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) ...@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
......
...@@ -146,8 +146,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) ...@@ -146,8 +146,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
tqp_vectors->name, tqp_vectors->name, tqp_vectors);
tqp_vectors);
if (ret) { if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n", netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq); tqp_vectors->vector_irq);
...@@ -290,8 +289,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) ...@@ -290,8 +289,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
ret = netif_set_real_num_tx_queues(netdev, queue_size); ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) { if (ret) {
netdev_err(netdev, netdev_err(netdev,
"netif_set_real_num_tx_queues fail, ret=%d!\n", "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
ret);
return ret; return ret;
} }
...@@ -347,7 +345,7 @@ static int hns3_nic_net_up(struct net_device *netdev) ...@@ -347,7 +345,7 @@ static int hns3_nic_net_up(struct net_device *netdev)
/* get irq resource for all vectors */ /* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv); ret = hns3_nic_init_irq(priv);
if (ret) { if (ret) {
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); netdev_err(netdev, "init irq failed! ret=%d\n", ret);
return ret; return ret;
} }
...@@ -422,16 +420,13 @@ static int hns3_nic_net_open(struct net_device *netdev) ...@@ -422,16 +420,13 @@ static int hns3_nic_net_open(struct net_device *netdev)
ret = hns3_nic_net_up(netdev); ret = hns3_nic_net_up(netdev);
if (ret) { if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state); set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
netdev_err(netdev, netdev_err(netdev, "net up fail, ret=%d!\n", ret);
"hns net up fail, ret=%d!\n", ret);
return ret; return ret;
} }
kinfo = &h->kinfo; kinfo = &h->kinfo;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
netdev_set_prio_tc_map(netdev, i, netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
kinfo->prio_tc[i]);
}
if (h->ae_algo->ops->enable_timer_task) if (h->ae_algo->ops->enable_timer_task)
h->ae_algo->ops->enable_timer_task(priv->ae_handle, true); h->ae_algo->ops->enable_timer_task(priv->ae_handle, true);
...@@ -635,7 +630,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, ...@@ -635,7 +630,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
if (l3.v4->version == 4) if (l3.v4->version == 4)
l3.v4->check = 0; l3.v4->check = 0;
/* tunnel packet.*/ /* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM | SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL |
...@@ -665,11 +660,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, ...@@ -665,11 +660,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
l3.v4->check = 0; l3.v4->check = 0;
} }
/* normal or tunnel packet*/ /* normal or tunnel packet */
l4_offset = l4.hdr - skb->data; l4_offset = l4.hdr - skb->data;
hdr_len = (l4.tcp->doff << 2) + l4_offset; hdr_len = (l4.tcp->doff << 2) + l4_offset;
/* remove payload length from inner pseudo checksum when tso*/ /* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset; l4_paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(l4_paylen)); (__force __wsum)htonl(l4_paylen));
...@@ -757,7 +752,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, ...@@ -757,7 +752,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
l2_len = l3.hdr - skb->data; l2_len = l3.hdr - skb->data;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/ /* tunnel packet */
if (skb->encapsulation) { if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */ /* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len; ol2_len = l2_len;
...@@ -769,9 +764,9 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, ...@@ -769,9 +764,9 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
ol3_len >> 2); ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/ /* MAC in UDP, MAC in GRE (0x6558) */
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
/* switch MAC header ptr from outer to inner header.*/ /* switch MAC header ptr from outer to inner header */
l2_hdr = skb_inner_mac_header(skb); l2_hdr = skb_inner_mac_header(skb);
/* compute OL4 header size, defined in 4 Bytes. */ /* compute OL4 header size, defined in 4 Bytes. */
...@@ -893,9 +888,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -893,9 +888,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
l3.hdr = skb_network_header(skb); l3.hdr = skb_network_header(skb);
/* define OL3 type and tunnel type(OL4).*/ /* define OL3 type and tunnel type(OL4) */
if (skb->encapsulation) { if (skb->encapsulation) {
/* define outer network header type.*/ /* define outer network header type */
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb)) if (skb_is_gso(skb))
hns3_set_field(*ol_type_vlan_len_msec, hns3_set_field(*ol_type_vlan_len_msec,
...@@ -911,7 +906,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -911,7 +906,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
HNS3_OL3T_IPV6); HNS3_OL3T_IPV6);
} }
/* define tunnel type(OL4).*/ /* define tunnel type(OL4) */
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_UDP: case IPPROTO_UDP:
hns3_set_field(*ol_type_vlan_len_msec, hns3_set_field(*ol_type_vlan_len_msec,
...@@ -1081,8 +1076,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1081,8 +1076,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* Set txbd */ /* Set txbd */
desc->tx.ol_type_vlan_len_msec = desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec); cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen); desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss); desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag); desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
...@@ -1094,7 +1088,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1094,7 +1088,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
} }
if (unlikely(dma_mapping_error(ring->dev, dma))) { if (unlikely(dma_mapping_error(dev, dma))) {
ring->stats.sw_err_cnt++; ring->stats.sw_err_cnt++;
return -ENOMEM; return -ENOMEM;
} }
...@@ -1123,7 +1117,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1123,7 +1117,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.bdtp_fe_sc_vld_ra_ri = desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri); cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
/* move ring pointer to next.*/ /* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use]; desc_cb = &ring->desc_cb[ring->next_to_use];
...@@ -1673,7 +1667,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1673,7 +1667,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{ {
struct hns3_nic_priv *priv = netdev_priv(ndev); struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hns3_enet_ring *tx_ring = NULL; struct hns3_enet_ring *tx_ring;
int timeout_queue = 0; int timeout_queue = 0;
int hw_head, hw_tail; int hw_head, hw_tail;
int i; int i;
...@@ -1816,8 +1810,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1816,8 +1810,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
int ret; int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
GFP_KERNEL);
if (!ae_dev) { if (!ae_dev) {
ret = -ENOMEM; ret = -ENOMEM;
return ret; return ret;
...@@ -2017,7 +2010,6 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -2017,7 +2010,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision > HNAE3_REVISION_ID_20) { if (pdev->revision > HNAE3_REVISION_ID_20) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
#ifdef NETIF_F_GRO_HW #ifdef NETIF_F_GRO_HW
netdev->features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW;
netdev->hw_features |= NETIF_F_GRO_HW; netdev->hw_features |= NETIF_F_GRO_HW;
...@@ -2125,8 +2117,7 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) ...@@ -2125,8 +2117,7 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
int size = ring->desc_num * sizeof(ring->desc[0]); int size = ring->desc_num * sizeof(ring->desc[0]);
ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
&ring->desc_dma_addr, &ring->desc_dma_addr, GFP_KERNEL);
GFP_KERNEL);
if (!ring->desc) if (!ring->desc)
return -ENOMEM; return -ENOMEM;
...@@ -2198,8 +2189,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, ...@@ -2198,8 +2189,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{ {
ring->desc_cb[i].reuse_flag = 0; ring->desc_cb[i].reuse_flag = 0;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset); ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0; ring->desc[i].rx.bd_base_info = 0;
} }
...@@ -2210,7 +2201,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, ...@@ -2210,7 +2201,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length; (*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
hns3_free_buffer_detach(ring, ring->next_to_clean); hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
...@@ -2292,8 +2283,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring) ...@@ -2292,8 +2283,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
} }
static void static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) int cleand_count)
{ {
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
struct hns3_desc_cb res_cbs; struct hns3_desc_cb res_cbs;
...@@ -2332,48 +2323,30 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2332,48 +2323,30 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
{ {
struct hns3_desc *desc; struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int truesize, size; int size = le16_to_cpu(desc->rx.size);
int last_offset; u32 truesize = hnae3_buf_size(ring);
bool twobufs;
twobufs = ((PAGE_SIZE < 8192) &&
hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size);
truesize = hnae3_buf_size(ring);
if (!twobufs)
last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize); size - pull_len, truesize);
/* Avoid re-using remote pages,flag default unreuse */ /* Avoid re-using remote pages, or the stack is still using the page
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) * when page buffer has wrap back, flag default unreuse
return; */
if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
if (twobufs) { (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
/* If we are only owner of page we can reuse it */
if (likely(page_count(desc_cb->priv) == 1)) {
/* Flip page offset to other buffer */
desc_cb->page_offset ^= truesize;
desc_cb->reuse_flag = 1;
/* bump ref count on page before it is given*/
get_page(desc_cb->priv);
}
return; return;
}
/* Move offset up to the next cache line */ /* Move offset up to the next cache line */
desc_cb->page_offset += truesize; desc_cb->page_offset += truesize;
if (desc_cb->page_offset <= last_offset) { if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given*/ /* Bump ref count on page before it is given */
get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1;
desc_cb->page_offset = 0;
get_page(desc_cb->priv); get_page(desc_cb->priv);
} }
} }
...@@ -2525,7 +2498,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, ...@@ -2525,7 +2498,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */ /* We can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */ else /* This page cannot be reused so discard it */
put_page(desc_cb->priv); put_page(desc_cb->priv);
...@@ -2648,8 +2621,7 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, ...@@ -2648,8 +2621,7 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
*/ */
NAPI_GRO_CB(skb)->count = gro_count; NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4) if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6) else if (l3_type == HNS3_L3_TYPE_IPV6)
...@@ -2795,8 +2767,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2795,8 +2767,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return 0; return 0;
} }
int hns3_clean_rx_ring( int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{ {
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
...@@ -2851,8 +2822,7 @@ int hns3_clean_rx_ring( ...@@ -2851,8 +2822,7 @@ int hns3_clean_rx_ring(
out: out:
/* Make all data has been write before submit */ /* Make all data has been write before submit */
if (clean_count + unused_count > 0) if (clean_count + unused_count > 0)
hns3_nic_alloc_rx_buffers(ring, hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
clean_count + unused_count);
return recv_pkts; return recv_pkts;
} }
...@@ -3309,10 +3279,8 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) ...@@ -3309,10 +3279,8 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) { if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
irq_set_affinity_notifier(tqp_vector->vector_irq, irq_set_affinity_notifier(tqp_vector->vector_irq, NULL);
NULL); irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
irq_set_affinity_hint(tqp_vector->vector_irq,
NULL);
free_irq(priv->tqp_vector[i].vector_irq, free_irq(priv->tqp_vector[i].vector_irq,
&priv->tqp_vector[i]); &priv->tqp_vector[i]);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
...@@ -3453,8 +3421,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) ...@@ -3453,8 +3421,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
if (ring->desc_num <= 0 || ring->buf_size <= 0) if (ring->desc_num <= 0 || ring->buf_size <= 0)
return -EINVAL; return -EINVAL;
ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
GFP_KERNEL); sizeof(ring->desc_cb[0]), GFP_KERNEL);
if (!ring->desc_cb) { if (!ring->desc_cb) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -3475,7 +3443,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) ...@@ -3475,7 +3443,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
out_with_desc: out_with_desc:
hns3_free_desc(ring); hns3_free_desc(ring);
out_with_desc_cb: out_with_desc_cb:
kfree(ring->desc_cb); devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL; ring->desc_cb = NULL;
out: out:
return ret; return ret;
...@@ -3484,7 +3452,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) ...@@ -3484,7 +3452,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
static void hns3_fini_ring(struct hns3_enet_ring *ring) static void hns3_fini_ring(struct hns3_enet_ring *ring)
{ {
hns3_free_desc(ring); hns3_free_desc(ring);
kfree(ring->desc_cb); devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL; ring->desc_cb = NULL;
ring->next_to_clean = 0; ring->next_to_clean = 0;
ring->next_to_use = 0; ring->next_to_use = 0;
...@@ -3525,8 +3493,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) ...@@ -3525,8 +3493,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
struct hnae3_queue *q = ring->tqp; struct hnae3_queue *q = ring->tqp;
if (!HNAE3_IS_TX_RING(ring)) { if (!HNAE3_IS_TX_RING(ring)) {
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
(u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1)); (u32)((dma >> 31) >> 1));
...@@ -3773,7 +3740,7 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3773,7 +3740,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
ret = hns3_client_start(handle); ret = hns3_client_start(handle);
if (ret) { if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
goto out_reg_netdev_fail; goto out_client_start;
} }
hns3_dcbnl_setup(handle); hns3_dcbnl_setup(handle);
...@@ -3789,6 +3756,8 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3789,6 +3756,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret; return ret;
out_client_start:
unregister_netdev(netdev);
out_reg_netdev_fail: out_reg_netdev_fail:
hns3_uninit_phy(netdev); hns3_uninit_phy(netdev);
out_init_phy: out_init_phy:
...@@ -3958,8 +3927,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ...@@ -3958,8 +3927,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ret); ret);
return ret; return ret;
} }
hns3_replace_buffer(ring, ring->next_to_use, hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
&res_cbs);
} }
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
} }
...@@ -4130,7 +4098,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) ...@@ -4130,7 +4098,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
ret = hns3_nic_net_open(kinfo->netdev); ret = hns3_nic_net_open(kinfo->netdev);
if (ret) { if (ret) {
netdev_err(kinfo->netdev, netdev_err(kinfo->netdev,
"hns net up fail, ret=%d!\n", ret); "net up fail, ret=%d!\n", ret);
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
return ret; return ret;
} }
...@@ -4171,6 +4139,12 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) ...@@ -4171,6 +4139,12 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret) if (ret)
goto err_uninit_vector; goto err_uninit_vector;
ret = hns3_client_start(handle);
if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
goto err_uninit_vector;
}
set_bit(HNS3_NIC_STATE_INITED, &priv->state); set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return 0; return 0;
......
...@@ -398,7 +398,6 @@ struct hns3_enet_ring { ...@@ -398,7 +398,6 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next; struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp; struct hnae3_queue *tqp;
char ring_name[HNS3_RING_NAME_LEN];
struct device *dev; /* will be used for DMA mapping of descriptors */ struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */ /* statistic */
...@@ -408,9 +407,6 @@ struct hns3_enet_ring { ...@@ -408,9 +407,6 @@ struct hns3_enet_ring {
dma_addr_t desc_dma_addr; dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */ u16 desc_num; /* total number of desc */
u16 max_desc_num_per_pkt;
u16 max_raw_data_sz_per_desc;
u16 max_pkt_size;
int next_to_use; /* idx of next spare desc */ int next_to_use; /* idx of next spare desc */
/* idx of lastest sent desc, the ring is empty when equal to /* idx of lastest sent desc, the ring is empty when equal to
...@@ -424,9 +420,6 @@ struct hns3_enet_ring { ...@@ -424,9 +420,6 @@ struct hns3_enet_ring {
u32 flag; /* ring attribute */ u32 flag; /* ring attribute */
int numa_node;
cpumask_t affinity_mask;
int pending_buf; int pending_buf;
struct sk_buff *skb; struct sk_buff *skb;
struct sk_buff *tail_skb; struct sk_buff *tail_skb;
...@@ -634,7 +627,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) ...@@ -634,7 +627,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) #define ring_to_dev(ring) ((ring)->dev)
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE) DMA_TO_DEVICE : DMA_FROM_DEVICE)
......
...@@ -58,6 +58,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { ...@@ -58,6 +58,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128
#define HNS3_NIC_LB_SETUP_USEC 10000
/* Nic loopback test err */ /* Nic loopback test err */
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
...@@ -115,7 +116,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) ...@@ -115,7 +116,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret; return ret;
ret = hns3_lp_setup(ndev, loop_mode, true); ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000); usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return ret; return ret;
} }
...@@ -130,7 +131,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) ...@@ -130,7 +131,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
return ret; return ret;
} }
usleep_range(10000, 20000); usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
return 0; return 0;
} }
...@@ -152,6 +153,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) ...@@ -152,6 +153,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
/* The dst mac addr of loopback packet is the same as the host'
* mac addr, the SSU component may loop back the packet to host
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
ethh->h_dest[5] += 0x1f; ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source); eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP); ethh->h_proto = htons(ETH_P_ARP);
......
...@@ -209,12 +209,14 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, ...@@ -209,12 +209,14 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw,
retval = -EPERM; retval = -EPERM;
else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED) else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
retval = -EOPNOTSUPP; retval = -EOPNOTSUPP;
else if (desc_ret == HCLGE_CMD_QUEUE_ILLEGAL)
retval = -ENXIO;
else else
retval = -EIO; retval = -EIO;
hw->cmq.last_status = desc_ret; hw->cmq.last_status = desc_ret;
(*ntc)++; (*ntc)++;
handle++; handle++;
if (*ntc == hw->cmq.csq.desc_num) if (*ntc >= hw->cmq.csq.desc_num)
*ntc = 0; *ntc = 0;
} }
return retval; return retval;
...@@ -257,7 +259,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -257,7 +259,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle]; *desc_to_use = desc[handle];
(hw->cmq.csq.next_to_use)++; (hw->cmq.csq.next_to_use)++;
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
hw->cmq.csq.next_to_use = 0; hw->cmq.csq.next_to_use = 0;
handle++; handle++;
} }
...@@ -393,6 +395,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) ...@@ -393,6 +395,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
return 0; return 0;
} }
static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
{
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
}
static void hclge_destroy_queue(struct hclge_cmq_ring *ring) static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
{ {
spin_lock(&ring->lock); spin_lock(&ring->lock);
...@@ -405,3 +421,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw) ...@@ -405,3 +421,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw)
hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.csq);
hclge_destroy_queue(&hw->cmq.crq); hclge_destroy_queue(&hw->cmq.crq);
} }
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock_bh(&hdev->hw.cmq.crq.lock);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
hclge_cmd_uninit_regs(&hdev->hw);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
hclge_destroy_cmd_queue(&hdev->hw);
}
...@@ -41,6 +41,7 @@ enum hclge_cmd_return_status { ...@@ -41,6 +41,7 @@ enum hclge_cmd_return_status {
HCLGE_CMD_NO_AUTH = 1, HCLGE_CMD_NO_AUTH = 1,
HCLGE_CMD_NOT_SUPPORTED = 2, HCLGE_CMD_NOT_SUPPORTED = 2,
HCLGE_CMD_QUEUE_FULL = 3, HCLGE_CMD_QUEUE_FULL = 3,
HCLGE_CMD_QUEUE_ILLEGAL = 10,
}; };
enum hclge_cmd_status { enum hclge_cmd_status {
...@@ -318,16 +319,16 @@ struct hclge_ctrl_vector_chain_cmd { ...@@ -318,16 +319,16 @@ struct hclge_ctrl_vector_chain_cmd {
u8 rsv; u8 rsv;
}; };
#define HCLGE_TC_NUM 8 #define HCLGE_MAX_TC_NUM 8
#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
struct hclge_tx_buff_alloc_cmd { struct hclge_tx_buff_alloc_cmd {
__le16 tx_pkt_buff[HCLGE_TC_NUM]; __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM];
u8 tx_buff_rsv[8]; u8 tx_buff_rsv[8];
}; };
struct hclge_rx_priv_buff_cmd { struct hclge_rx_priv_buff_cmd {
__le16 buf_num[HCLGE_TC_NUM]; __le16 buf_num[HCLGE_MAX_TC_NUM];
__le16 shared_buf; __le16 shared_buf;
u8 rsv[6]; u8 rsv[6];
}; };
...@@ -373,7 +374,6 @@ struct hclge_priv_buf { ...@@ -373,7 +374,6 @@ struct hclge_priv_buf {
u32 enable; /* Enable TC private buffer or not */ u32 enable; /* Enable TC private buffer or not */
}; };
#define HCLGE_MAX_TC_NUM 8
struct hclge_shared_buf { struct hclge_shared_buf {
struct hclge_waterline self; struct hclge_waterline self;
struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM];
...@@ -620,6 +620,11 @@ enum hclge_mac_vlan_tbl_opcode { ...@@ -620,6 +620,11 @@ enum hclge_mac_vlan_tbl_opcode {
HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
}; };
enum hclge_mac_vlan_add_resp_code {
HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
};
#define HCLGE_MAC_VLAN_BIT0_EN_B 0 #define HCLGE_MAC_VLAN_BIT0_EN_B 0
#define HCLGE_MAC_VLAN_BIT1_EN_B 1 #define HCLGE_MAC_VLAN_BIT1_EN_B 1
#define HCLGE_MAC_EPORT_SW_EN_B 12 #define HCLGE_MAC_EPORT_SW_EN_B 12
...@@ -732,7 +737,9 @@ struct hclge_mac_ethertype_idx_rd_cmd { ...@@ -732,7 +737,9 @@ struct hclge_mac_ethertype_idx_rd_cmd {
struct hclge_vlan_filter_ctrl_cmd { struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type; u8 vlan_type;
u8 vlan_fe; u8 vlan_fe;
u8 rsv[22]; u8 rsv1[2];
u8 vf_id;
u8 rsv2[19];
}; };
struct hclge_vlan_filter_pf_cfg_cmd { struct hclge_vlan_filter_pf_cfg_cmd {
...@@ -996,6 +1003,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, ...@@ -996,6 +1003,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
struct hclge_desc *desc); struct hclge_desc *desc);
void hclge_destroy_cmd_queue(struct hclge_hw *hw); void hclge_cmd_uninit(struct hclge_dev *hdev);
int hclge_cmd_queue_init(struct hclge_dev *hdev); int hclge_cmd_queue_init(struct hclge_dev *hdev);
#endif #endif
...@@ -42,6 +42,8 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, ...@@ -42,6 +42,8 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{ {
struct hclge_desc desc[4]; struct hclge_desc desc[4];
int entries_per_desc;
int index;
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
...@@ -59,7 +61,9 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) ...@@ -59,7 +61,9 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
return ret; return ret;
} }
return (int)desc[offset / 6].data[offset % 6]; entries_per_desc = ARRAY_SIZE(desc[0].data);
index = offset % entries_per_desc;
return (int)desc[offset / entries_per_desc].data[index];
} }
static int hclge_dbg_cmd_send(struct hclge_dev *hdev, static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
...@@ -96,12 +100,13 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -96,12 +100,13 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
{ {
struct hclge_desc *desc_src; struct hclge_desc *desc_src;
struct hclge_desc *desc; struct hclge_desc *desc;
int entries_per_desc;
int bd_num, buf_len; int bd_num, buf_len;
int ret, i; int ret, i;
int index; int index;
int max; int max;
ret = kstrtouint(cmd_buf, 10, &index); ret = kstrtouint(cmd_buf, 0, &index);
index = (ret != 0) ? 0 : index; index = (ret != 0) ? 0 : index;
bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset); bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
...@@ -125,14 +130,18 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -125,14 +130,18 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return; return;
} }
max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num; entries_per_desc = ARRAY_SIZE(desc->data);
max = (bd_num * entries_per_desc) <= msg_num ?
(bd_num * entries_per_desc) : msg_num;
desc = desc_src; desc = desc_src;
for (i = 0; i < max; i++) { for (i = 0; i < max; i++) {
(((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc; ((i > 0) && ((i % entries_per_desc) == 0)) ? desc++ : desc;
if (dfx_message->flag) if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n", dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message, desc->data[i % 6]); dfx_message->message,
desc->data[i % entries_per_desc]);
dfx_message++; dfx_message++;
} }
...@@ -244,92 +253,92 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf) ...@@ -244,92 +253,92 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
{ {
int msg_num; int msg_num;
if (strncmp(&cmd_buf[9], "bios common", 11) == 0) { if (strncmp(cmd_buf, "bios common", 11) == 0) {
msg_num = sizeof(hclge_dbg_bios_common_reg) / msg_num = sizeof(hclge_dbg_bios_common_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
&cmd_buf[21], msg_num, &cmd_buf[sizeof("bios common")],
HCLGE_DBG_DFX_BIOS_OFFSET, msg_num, HCLGE_DBG_DFX_BIOS_OFFSET,
HCLGE_OPC_DFX_BIOS_COMMON_REG); HCLGE_OPC_DFX_BIOS_COMMON_REG);
} else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) { } else if (strncmp(cmd_buf, "ssu", 3) == 0) {
msg_num = sizeof(hclge_dbg_ssu_reg_0) / msg_num = sizeof(hclge_dbg_ssu_reg_0) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_0_OFFSET, HCLGE_DBG_DFX_SSU_0_OFFSET,
HCLGE_OPC_DFX_SSU_REG_0); HCLGE_OPC_DFX_SSU_REG_0);
msg_num = sizeof(hclge_dbg_ssu_reg_1) / msg_num = sizeof(hclge_dbg_ssu_reg_1) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_1_OFFSET, HCLGE_DBG_DFX_SSU_1_OFFSET,
HCLGE_OPC_DFX_SSU_REG_1); HCLGE_OPC_DFX_SSU_REG_1);
msg_num = sizeof(hclge_dbg_ssu_reg_2) / msg_num = sizeof(hclge_dbg_ssu_reg_2) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_2_OFFSET, HCLGE_DBG_DFX_SSU_2_OFFSET,
HCLGE_OPC_DFX_SSU_REG_2); HCLGE_OPC_DFX_SSU_REG_2);
} else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) { } else if (strncmp(cmd_buf, "igu egu", 7) == 0) {
msg_num = sizeof(hclge_dbg_igu_egu_reg) / msg_num = sizeof(hclge_dbg_igu_egu_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
&cmd_buf[17], msg_num, &cmd_buf[sizeof("igu egu")], msg_num,
HCLGE_DBG_DFX_IGU_OFFSET, HCLGE_DBG_DFX_IGU_OFFSET,
HCLGE_OPC_DFX_IGU_EGU_REG); HCLGE_OPC_DFX_IGU_EGU_REG);
} else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) { } else if (strncmp(cmd_buf, "rpu", 3) == 0) {
msg_num = sizeof(hclge_dbg_rpu_reg_0) / msg_num = sizeof(hclge_dbg_rpu_reg_0) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rpu")], msg_num,
HCLGE_DBG_DFX_RPU_0_OFFSET, HCLGE_DBG_DFX_RPU_0_OFFSET,
HCLGE_OPC_DFX_RPU_REG_0); HCLGE_OPC_DFX_RPU_REG_0);
msg_num = sizeof(hclge_dbg_rpu_reg_1) / msg_num = sizeof(hclge_dbg_rpu_reg_1) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rpu")], msg_num,
HCLGE_DBG_DFX_RPU_1_OFFSET, HCLGE_DBG_DFX_RPU_1_OFFSET,
HCLGE_OPC_DFX_RPU_REG_1); HCLGE_OPC_DFX_RPU_REG_1);
} else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) { } else if (strncmp(cmd_buf, "ncsi", 4) == 0) {
msg_num = sizeof(hclge_dbg_ncsi_reg) / msg_num = sizeof(hclge_dbg_ncsi_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
&cmd_buf[14], msg_num, &cmd_buf[sizeof("ncsi")], msg_num,
HCLGE_DBG_DFX_NCSI_OFFSET, HCLGE_DBG_DFX_NCSI_OFFSET,
HCLGE_OPC_DFX_NCSI_REG); HCLGE_OPC_DFX_NCSI_REG);
} else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) { } else if (strncmp(cmd_buf, "rtc", 3) == 0) {
msg_num = sizeof(hclge_dbg_rtc_reg) / msg_num = sizeof(hclge_dbg_rtc_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rtc")], msg_num,
HCLGE_DBG_DFX_RTC_OFFSET, HCLGE_DBG_DFX_RTC_OFFSET,
HCLGE_OPC_DFX_RTC_REG); HCLGE_OPC_DFX_RTC_REG);
} else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) { } else if (strncmp(cmd_buf, "ppp", 3) == 0) {
msg_num = sizeof(hclge_dbg_ppp_reg) / msg_num = sizeof(hclge_dbg_ppp_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("ppp")], msg_num,
HCLGE_DBG_DFX_PPP_OFFSET, HCLGE_DBG_DFX_PPP_OFFSET,
HCLGE_OPC_DFX_PPP_REG); HCLGE_OPC_DFX_PPP_REG);
} else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) { } else if (strncmp(cmd_buf, "rcb", 3) == 0) {
msg_num = sizeof(hclge_dbg_rcb_reg) / msg_num = sizeof(hclge_dbg_rcb_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("rcb")], msg_num,
HCLGE_DBG_DFX_RCB_OFFSET, HCLGE_DBG_DFX_RCB_OFFSET,
HCLGE_OPC_DFX_RCB_REG); HCLGE_OPC_DFX_RCB_REG);
} else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) { } else if (strncmp(cmd_buf, "tqp", 3) == 0) {
msg_num = sizeof(hclge_dbg_tqp_reg) / msg_num = sizeof(hclge_dbg_tqp_reg) /
sizeof(struct hclge_dbg_dfx_message); sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg, hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
&cmd_buf[13], msg_num, &cmd_buf[sizeof("tqp")], msg_num,
HCLGE_DBG_DFX_TQP_OFFSET, HCLGE_DBG_DFX_TQP_OFFSET,
HCLGE_OPC_DFX_TQP_REG); HCLGE_OPC_DFX_TQP_REG);
} else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) { } else if (strncmp(cmd_buf, "dcb", 3) == 0) {
hclge_dbg_dump_dcb(hdev, &cmd_buf[13]); hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
} else { } else {
dev_info(&hdev->pdev->dev, "unknown command\n"); dev_info(&hdev->pdev->dev, "unknown command\n");
return; return;
...@@ -601,7 +610,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf) ...@@ -601,7 +610,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
int pri_id, ret; int pri_id, ret;
u32 i; u32 i;
ret = kstrtouint(&cmd_buf[12], 10, &queue_id); ret = kstrtouint(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id; queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
...@@ -772,7 +781,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -772,7 +781,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
tx_buf_cmd->tx_pkt_buff[i]); tx_buf_cmd->tx_pkt_buff[i]);
...@@ -784,7 +793,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -784,7 +793,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
rx_buf_cmd->buf_num[i]); rx_buf_cmd->buf_num[i]);
...@@ -893,8 +902,8 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) ...@@ -893,8 +902,8 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev)
mc_tbl_idx = 0; mc_tbl_idx = 0;
for (i = 0; i < HCLGE_DBG_MAC_TBL_MAX; i++) { for (i = 0; i < HCLGE_DBG_MAC_TBL_MAX; i++) {
/* Prevent long-term occupation of the command channel. */ /* Prevent long-term occupation of the command channel. */
if ((i % 100) == 0) if ((i % HCLGE_DBG_SCAN_STEP) == 0)
msleep(100); msleep(HCLGE_DBG_PAUSE_TIME);
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_PPP_MAC_VLAN_IDX_RD, hclge_cmd_setup_basic_desc(&desc[0], HCLGE_PPP_MAC_VLAN_IDX_RD,
true); true);
...@@ -919,6 +928,19 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) ...@@ -919,6 +928,19 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev)
if (mac_rd_cmd->resp_code) if (mac_rd_cmd->resp_code)
continue; continue;
if (mac_rd_cmd->entry_type == HCLGE_DBG_MAC_MC_TBL) {
mc_mac_tbl[mc_tbl_idx].index = i;
memcpy(mc_mac_tbl[mc_tbl_idx].mac_add,
mac_rd_cmd->mac_add, 6);
memcpy(mc_mac_tbl[mc_tbl_idx].mg_vf_mb,
desc[1].data, 24);
memcpy(&mc_mac_tbl[mc_tbl_idx].mg_vf_mb[24],
desc[2].data, 8);
mc_tbl_idx++;
continue;
}
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN, snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"|%04d |%02x:%02x:%02x:%02x:%02x:%02x |", "|%04d |%02x:%02x:%02x:%02x:%02x:%02x |",
...@@ -941,17 +963,6 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) ...@@ -941,17 +963,6 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev)
mac_rd_cmd->egress_port & HCLGE_DBG_MAC_TBL_E_PORT); mac_rd_cmd->egress_port & HCLGE_DBG_MAC_TBL_E_PORT);
dev_info(&hdev->pdev->dev, "%s", printf_buf); dev_info(&hdev->pdev->dev, "%s", printf_buf);
if (mac_rd_cmd->entry_type == HCLGE_DBG_MAC_MC_TBL) {
mc_mac_tbl[mc_tbl_idx].index = i;
memcpy(mc_mac_tbl[mc_tbl_idx].mac_add,
mac_rd_cmd->mac_add, 6);
memcpy(mc_mac_tbl[mc_tbl_idx].mg_vf_mb,
desc[1].data, 24);
memcpy(&mc_mac_tbl[mc_tbl_idx].mg_vf_mb[24],
desc[2].data, 8);
mc_tbl_idx++;
}
} }
if (mc_tbl_idx > 0) { if (mc_tbl_idx > 0) {
...@@ -1028,7 +1039,7 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev) ...@@ -1028,7 +1039,7 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev)
u32 vlan_id; u32 vlan_id;
int ret; int ret;
vlan_len = HCLGE_DBG_VLAN_ID_MAX / 8; vlan_len = HCLGE_DBG_VLAN_ID_MAX / HCLGE_VLAN_BYTE_SIZE;
vlan_bitmap = kzalloc(vlan_len, GFP_KERNEL); vlan_bitmap = kzalloc(vlan_len, GFP_KERNEL);
if (!vlan_bitmap) { if (!vlan_bitmap) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -1038,15 +1049,15 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev) ...@@ -1038,15 +1049,15 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev)
for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) { for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) {
/* Prevent long-term occupation of the command channel. */ /* Prevent long-term occupation of the command channel. */
if ((vlan_id % 100) == 0) if ((vlan_id % HCLGE_DBG_SCAN_STEP) == 0)
msleep(100); msleep(HCLGE_DBG_PAUSE_TIME);
hclge_cmd_setup_basic_desc(&desc, hclge_cmd_setup_basic_desc(&desc,
HCLGE_OPC_VLAN_FILTER_PF_CFG, true); HCLGE_OPC_VLAN_FILTER_PF_CFG, true);
vlan_offset = vlan_id / 160; vlan_offset = vlan_id / HCLGE_VLAN_ID_B;
vlan_byte = (vlan_id % 160) / 8; vlan_byte = (vlan_id % HCLGE_VLAN_ID_B) / HCLGE_VLAN_BYTE_SIZE;
vlan_byte_val = 1 << (vlan_id % 8); vlan_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset; req->vlan_offset = vlan_offset;
...@@ -1086,7 +1097,7 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf) ...@@ -1086,7 +1097,7 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf)
u16 vf_id; u16 vf_id;
int ret; int ret;
ret = kstrtou16(&cmd_buf[17], 10, &vf_id); ret = kstrtou16(cmd_buf, 0, &vf_id);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"vf id failed. vf id max: %d\n", hdev->num_alloc_vfs); "vf id failed. vf id max: %d\n", hdev->num_alloc_vfs);
...@@ -1103,8 +1114,8 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf) ...@@ -1103,8 +1114,8 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf)
for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) { for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) {
/* Prevent long-term occupation of the command channel. */ /* Prevent long-term occupation of the command channel. */
if ((vlan_id % 100) == 0) if ((vlan_id % HCLGE_DBG_SCAN_STEP) == 0)
msleep(100); msleep(HCLGE_DBG_PAUSE_TIME);
hclge_cmd_setup_basic_desc(&desc[0], hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, true); HCLGE_OPC_VLAN_FILTER_VF_CFG, true);
...@@ -1237,14 +1248,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, ...@@ -1237,14 +1248,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc); sel_x ? "x" : "y", loc);
/* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data; req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++); dev_info(&hdev->pdev->dev, "%08x\n", *req++);
/* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data; req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++); dev_info(&hdev->pdev->dev, "%08x\n", *req++);
/* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data; req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++); dev_info(&hdev->pdev->dev, "%08x\n", *req++);
...@@ -1272,7 +1286,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) ...@@ -1272,7 +1286,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
} else if (strncmp(cmd_buf, "dump tc", 7) == 0) { } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
hclge_dbg_dump_tc(hdev); hclge_dbg_dump_tc(hdev);
} else if (strncmp(cmd_buf, "dump tm map", 11) == 0) { } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
hclge_dbg_dump_tm_map(hdev, cmd_buf); hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof("dump tm map")]);
} else if (strncmp(cmd_buf, "dump tm", 7) == 0) { } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
hclge_dbg_dump_tm(hdev); hclge_dbg_dump_tm(hdev);
} else if (strncmp(cmd_buf, "dump checksum", 13) == 0) { } else if (strncmp(cmd_buf, "dump checksum", 13) == 0) {
...@@ -1288,11 +1302,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) ...@@ -1288,11 +1302,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
} else if (strncmp(cmd_buf, "dump port vlan tbl", 18) == 0) { } else if (strncmp(cmd_buf, "dump port vlan tbl", 18) == 0) {
hclge_dbg_dump_port_vlan_table(hdev); hclge_dbg_dump_port_vlan_table(hdev);
} else if (strncmp(cmd_buf, "dump vf vlan tbl", 16) == 0) { } else if (strncmp(cmd_buf, "dump vf vlan tbl", 16) == 0) {
hclge_dbg_dump_vf_vlan_table(hdev, cmd_buf); int len = sizeof("dump vf vlan tbl");
hclge_dbg_dump_vf_vlan_table(hdev, &cmd_buf[len]);
} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
hclge_dbg_dump_mng_table(hdev); hclge_dbg_dump_mng_table(hdev);
} else if (strncmp(cmd_buf, "dump reg", 8) == 0) { } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
hclge_dbg_dump_reg_cmd(hdev, cmd_buf); hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof("dump reg")]);
} else { } else {
dev_info(&hdev->pdev->dev, "unknown command\n"); dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL; return -EINVAL;
......
...@@ -43,6 +43,9 @@ ...@@ -43,6 +43,9 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12 #define HCLGE_DBG_DFX_SSU_2_OFFSET 12
#define HCLGE_DBG_SCAN_STEP 100
#define HCLGE_DBG_PAUSE_TIME 50
#pragma pack(1) #pragma pack(1)
struct hclge_checksum_cmd { struct hclge_checksum_cmd {
......
...@@ -80,7 +80,7 @@ const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { ...@@ -80,7 +80,7 @@ const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
...@@ -475,19 +475,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, ...@@ -475,19 +475,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
enum hclge_err_int_type int_type) enum hclge_err_int_type int_type)
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
int num = 1; int desc_num = 1;
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true); hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) { if (flag) {
desc[0].flag |= cpu_to_le16(flag); desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true); hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
num = 2; desc_num = 2;
} }
if (w_num) if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type); desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret) if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret); dev_err(dev, "query error cmd failed (%d)\n", ret);
...@@ -718,7 +718,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -718,7 +718,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
{ {
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2]; struct hclge_desc desc[2];
int num = 1; int desc_num = 1;
int ret; int ret;
/* configure PPU error interrupts */ /* configure PPU error interrupts */
...@@ -737,7 +737,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -737,7 +737,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
num = 2; desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false); hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en) if (en)
...@@ -755,7 +755,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, ...@@ -755,7 +755,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
return -EINVAL; return -EINVAL;
} }
ret = hclge_cmd_send(&hdev->hw, &desc[0], num); ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret; return ret;
} }
...@@ -940,8 +940,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, ...@@ -940,8 +940,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[3]; desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
if (status) if (status)
hclge_log_error(dev, "IGU_INT_STS", hclge_log_error(dev, "IGU_INT_STS", &hclge_igu_int[0], status);
&hclge_igu_int[0], status);
/* log PPP(Programmable Packet Process) errors */ /* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4]; desc_data = (__le32 *)&desc[4];
...@@ -1167,8 +1166,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) ...@@ -1167,8 +1166,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
int ret; int ret;
/* read overflow error status */ /* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0); 0, 0, 0);
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
...@@ -1207,10 +1205,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) ...@@ -1207,10 +1205,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
return 0; return 0;
} }
static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) static enum hnae3_reset_type
hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
{ {
enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET; enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev; struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2]; struct hclge_desc desc[2];
unsigned int status; unsigned int status;
...@@ -1223,17 +1221,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1223,17 +1221,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
/* reset everything for now */ /* reset everything for now */
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); return HNAE3_GLOBAL_RESET;
return ret;
} }
status = le32_to_cpu(desc[0].data[0]); status = le32_to_cpu(desc[0].data[0]);
if (status & HCLGE_ROCEE_RERR_INT_MASK) if (status & HCLGE_ROCEE_RERR_INT_MASK) {
dev_warn(dev, "ROCEE RAS AXI rresp error\n"); dev_warn(dev, "ROCEE RAS AXI rresp error\n");
return HNAE3_FUNC_RESET;
}
if (status & HCLGE_ROCEE_BERR_INT_MASK) if (status & HCLGE_ROCEE_BERR_INT_MASK) {
dev_warn(dev, "ROCEE RAS AXI bresp error\n"); dev_warn(dev, "ROCEE RAS AXI bresp error\n");
return HNAE3_FUNC_RESET;
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) { if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
...@@ -1245,9 +1246,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1245,9 +1246,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to process ovf error\n", ret); dev_err(dev, "failed(%d) to process ovf error\n", ret);
/* reset everything for now */ /* reset everything for now */
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); return HNAE3_GLOBAL_RESET;
return ret;
} }
reset_type = HNAE3_FUNC_RESET;
} }
/* clear error status */ /* clear error status */
...@@ -1256,12 +1257,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) ...@@ -1256,12 +1257,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (ret) { if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
/* reset everything for now */ /* reset everything for now */
reset_type = HNAE3_GLOBAL_RESET; return HNAE3_GLOBAL_RESET;
} }
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); return reset_type;
return ret;
} }
static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
...@@ -1293,13 +1292,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) ...@@ -1293,13 +1292,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{ {
enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv; struct hclge_dev *hdev = ae_dev->priv;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21) hdev->pdev->revision < 0x21)
return HNAE3_NONE_RESET; return reset_type;
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
if (reset_type != HNAE3_NONE_RESET)
HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
return hclge_log_and_clear_rocee_ras_error(hdev); return reset_type;
} }
static const struct hclge_hw_blk hw_blk[] = { static const struct hclge_hw_blk hw_blk[] = {
...@@ -1426,8 +1430,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1426,8 +1430,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n", dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
goto msi_error; goto msi_error;
} }
...@@ -1446,9 +1449,8 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1446,9 +1449,8 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
status = le32_to_cpu(*(desc_data + 2)) & status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status) { if (status) {
dev_warn(dev, hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
"PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", &hclge_ppu_mpf_abnormal_int_st2[0], status);
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
} }
...@@ -1458,8 +1460,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1458,8 +1460,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
goto msi_error; goto msi_error;
} }
...@@ -1472,8 +1473,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1472,8 +1473,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n", dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
goto msi_error; goto msi_error;
} }
...@@ -1506,8 +1506,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ...@@ -1506,8 +1506,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
if (ret) { if (ret) {
dev_err(dev, "clear all pf msix int cmd failed (%d)\n", dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret);
ret);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/crash_dump.h>
#include <net/rtnetlink.h> #include <net/rtnetlink.h>
#include "kcompat.h" #include "kcompat.h"
#include "hclge_cmd.h" #include "hclge_cmd.h"
...@@ -28,6 +29,8 @@ ...@@ -28,6 +29,8 @@
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256 #define HCLGE_BUF_SIZE_UNIT 256
#define HCLGE_BUF_MUL_BY 2
#define HCLGE_BUF_DIV_BY 2
#define HCLGE_RESET_MAX_FAIL_CNT 5 #define HCLGE_RESET_MAX_FAIL_CNT 5
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
...@@ -383,8 +386,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) ...@@ -383,8 +386,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
queue = handle->kinfo.tqp[i]; queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q); tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */ /* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0], hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
HCLGE_OPC_QUERY_RX_STATUS,
true); true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
...@@ -446,6 +448,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) ...@@ -446,6 +448,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{ {
struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hnae3_knic_private_info *kinfo = &handle->kinfo;
/* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2); return kinfo->num_tqps * (2);
} }
...@@ -590,8 +593,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) ...@@ -590,8 +593,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
return count; return count;
} }
static void hclge_get_strings(struct hnae3_handle *handle, static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u32 stringset,
u8 *data) u8 *data)
{ {
u8 *p = (char *)data; u8 *p = (char *)data;
...@@ -599,21 +601,17 @@ static void hclge_get_strings(struct hnae3_handle *handle, ...@@ -599,21 +601,17 @@ static void hclge_get_strings(struct hnae3_handle *handle,
if (stringset == ETH_SS_STATS) { if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string); size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(stringset, p = hclge_comm_get_strings(stringset, g_mac_stats_string,
g_mac_stats_string, size, p);
size,
p);
p = hclge_tqps_get_strings(handle, p); p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) { } else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
memcpy(p, memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
...@@ -624,8 +622,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, ...@@ -624,8 +622,7 @@ static void hclge_get_strings(struct hnae3_handle *handle,
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
memcpy(p, memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
...@@ -638,10 +635,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) ...@@ -638,10 +635,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u64 *p; u64 *p;
p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data);
ARRAY_SIZE(g_mac_stats_string),
data);
p = hclge_tqps_get_stats(handle, p); p = hclge_tqps_get_stats(handle, p);
} }
...@@ -662,6 +657,8 @@ static int hclge_parse_func_status(struct hclge_dev *hdev, ...@@ -662,6 +657,8 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
static int hclge_query_function_status(struct hclge_dev *hdev) static int hclge_query_function_status(struct hclge_dev *hdev)
{ {
#define HCLGE_QUERY_MAX_CNT 5
struct hclge_func_status_cmd *req; struct hclge_func_status_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
int timeout = 0; int timeout = 0;
...@@ -674,9 +671,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) ...@@ -674,9 +671,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"query function status failed %d.\n", "query function status failed %d.\n", ret);
ret);
return ret; return ret;
} }
...@@ -684,7 +679,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) ...@@ -684,7 +679,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
if (req->pf_state) if (req->pf_state)
break; break;
usleep_range(1000, 2000); usleep_range(1000, 2000);
} while (timeout++ < 5); } while (timeout++ < HCLGE_QUERY_MAX_CNT);
ret = hclge_parse_func_status(hdev, req); ret = hclge_parse_func_status(hdev, req);
...@@ -788,44 +783,44 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, ...@@ -788,44 +783,44 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
unsigned long *supported = hdev->hw.mac.supported; unsigned long *supported = hdev->hw.mac.supported;
if (speed_ability & HCLGE_SUPPORT_1G_BIT) if (speed_ability & HCLGE_SUPPORT_1G_BIT)
set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
supported); supported);
if (speed_ability & HCLGE_SUPPORT_10G_BIT) if (speed_ability & HCLGE_SUPPORT_10G_BIT)
set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
supported); supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT) if (speed_ability & HCLGE_SUPPORT_25G_BIT)
set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
supported); supported);
if (speed_ability & HCLGE_SUPPORT_40G_BIT) if (speed_ability & HCLGE_SUPPORT_40G_BIT)
set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
supported); supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT) if (speed_ability & HCLGE_SUPPORT_50G_BIT)
set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
supported); supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT) if (speed_ability & HCLGE_SUPPORT_100G_BIT)
set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
supported); supported);
set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
#else #else
unsigned long *supported = hdev->hw.mac.supported; unsigned long *supported = hdev->hw.mac.supported;
if (speed_ability & HCLGE_SUPPORT_1G_BIT) if (speed_ability & HCLGE_SUPPORT_1G_BIT)
set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
supported); supported);
if (speed_ability & HCLGE_SUPPORT_10G_BIT) if (speed_ability & HCLGE_SUPPORT_10G_BIT)
set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
supported); supported);
set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
#endif #endif
} }
...@@ -839,22 +834,24 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, ...@@ -839,22 +834,24 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
speed_ability = HCLGE_SUPPORT_GE; speed_ability = HCLGE_SUPPORT_GE;
if (speed_ability & HCLGE_SUPPORT_1G_BIT) if (speed_ability & HCLGE_SUPPORT_1G_BIT)
set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
supported);
if (speed_ability & HCLGE_SUPPORT_100M_BIT) { if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported); supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
supported);
} }
if (speed_ability & HCLGE_SUPPORT_10M_BIT) { if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
} }
set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
set_bit(SUPPORTED_Asym_Pause, supported);
} }
static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
...@@ -980,6 +977,22 @@ static int hclge_get_cap(struct hclge_dev *hdev) ...@@ -980,6 +977,22 @@ static int hclge_get_cap(struct hclge_dev *hdev)
return ret; return ret;
} }
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
{
#define HCLGE_MIN_TX_DESC 64
#define HCLGE_MIN_RX_DESC 64
if (is_kdump_kernel()) {
dev_info(&hdev->pdev->dev,
"Running kdump kernel. Using minimal resources\n");
/* minimal queue pairs equals to the number of vports */
hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
}
static int hclge_configure(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev)
{ {
struct hclge_cfg cfg; struct hclge_cfg cfg;
...@@ -1039,6 +1052,8 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1039,6 +1052,8 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
hclge_init_kdump_kernel_config(hdev);
return ret; return ret;
} }
...@@ -1301,6 +1316,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) ...@@ -1301,6 +1316,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true; vport->rxvlan_cfg.rx_vlan_offload_en = true;
INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
if (i == 0) if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport); ret = hclge_vport_setup(vport, tqp_main_vport);
...@@ -1333,7 +1350,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, ...@@ -1333,7 +1350,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
req = (struct hclge_tx_buff_alloc_cmd *)desc.data; req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
for (i = 0; i < HCLGE_TC_NUM; i++) { for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
req->tx_pkt_buff[i] = req->tx_pkt_buff[i] =
...@@ -1370,17 +1387,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev) ...@@ -1370,17 +1387,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev)
return cnt; return cnt;
} }
static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
{
int i, cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i) &&
hdev->tm_info.hw_pfc_map & BIT(i))
cnt++;
return cnt;
}
/* Get the number of pfc enabled TCs, which have private buffer */ /* Get the number of pfc enabled TCs, which have private buffer */
static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc) struct hclge_pkt_buf_alloc *buf_alloc)
...@@ -1445,24 +1451,21 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, ...@@ -1445,24 +1451,21 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
u32 rx_all) u32 rx_all)
{ {
u32 shared_buf_min, shared_buf_tc, shared_std; u32 shared_buf_min, shared_buf_tc, shared_std;
int tc_num, pfc_enable_num; int tc_num = hclge_get_tc_num(hdev);
u32 shared_buf, aligned_mps; u32 shared_buf, aligned_mps;
u32 rx_priv; u32 rx_priv;
int i; int i;
tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev)) if (hnae3_dev_dcb_supported(hdev))
shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size; shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
hdev->dv_buf_size;
else else
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size; + hdev->dv_buf_size;
shared_buf_tc = pfc_enable_num * aligned_mps + shared_buf_tc = tc_num * aligned_mps + aligned_mps;
(tc_num - pfc_enable_num) * aligned_mps / 2 +
aligned_mps;
shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
HCLGE_BUF_SIZE_UNIT); HCLGE_BUF_SIZE_UNIT);
...@@ -1475,23 +1478,20 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, ...@@ -1475,23 +1478,20 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
if (hnae3_dev_dcb_supported(hdev)) { if (hnae3_dev_dcb_supported(hdev)) {
buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
HCLGE_BUF_SIZE_UNIT);
} else { } else {
buf_alloc->s_buf.self.high = aligned_mps + buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF; HCLGE_NON_DCB_ADDITIONAL_BUF;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.low =
roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); roundup(aligned_mps / HCLGE_BUF_DIV_BY,
HCLGE_BUF_SIZE_UNIT);
} }
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
if ((hdev->hw_tc_map & BIT(i)) &&
(hdev->tm_info.hw_pfc_map & BIT(i))) {
buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; buf_alloc->s_buf.tc_thrd[i].high = HCLGE_BUF_MUL_BY *
} else { aligned_mps;
buf_alloc->s_buf.tc_thrd[i].low = 0;
buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
}
} }
return true; return true;
...@@ -1544,12 +1544,13 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, ...@@ -1544,12 +1544,13 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
priv->enable = 1; priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) { if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = max ? aligned_mps : 256; priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
priv->wl.high = roundup(priv->wl.low + aligned_mps, priv->wl.high = roundup(priv->wl.low + aligned_mps,
HCLGE_BUF_SIZE_UNIT); HCLGE_BUF_SIZE_UNIT);
} else { } else {
priv->wl.low = 0; priv->wl.low = 0;
priv->wl.high = max ? (aligned_mps * 2) : aligned_mps; priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
aligned_mps;
} }
priv->buf_size = priv->wl.high + hdev->dv_buf_size; priv->buf_size = priv->wl.high + hdev->dv_buf_size;
...@@ -1932,7 +1933,6 @@ static int hclge_init_msi(struct hclge_dev *hdev) ...@@ -1932,7 +1933,6 @@ static int hclge_init_msi(struct hclge_dev *hdev)
static u8 hclge_check_speed_dup(u8 duplex, int speed) static u8 hclge_check_speed_dup(u8 duplex, int speed)
{ {
if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
duplex = HCLGE_MAC_FULL; duplex = HCLGE_MAC_FULL;
...@@ -2195,7 +2195,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) ...@@ -2195,7 +2195,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
{ {
struct hclge_sfp_speed_cmd *resp = NULL; struct hclge_sfp_speed_cmd *resp;
struct hclge_desc desc; struct hclge_desc desc;
int ret = 0; int ret = 0;
...@@ -2488,8 +2488,7 @@ int hclge_notify_client(struct hclge_dev *hdev, ...@@ -2488,8 +2488,7 @@ int hclge_notify_client(struct hclge_dev *hdev,
struct hnae3_client *client = hdev->nic_client; struct hnae3_client *client = hdev->nic_client;
u16 i; u16 i;
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
!client)
return 0; return 0;
if (!client->ops->reset_notify) if (!client->ops->reset_notify)
...@@ -2634,7 +2633,7 @@ int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) ...@@ -2634,7 +2633,7 @@ int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return ret; return ret;
} }
if (!reset) if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
continue; continue;
/* Inform VF to process the reset. /* Inform VF to process the reset.
...@@ -3120,6 +3119,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev) ...@@ -3120,6 +3119,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
for (i = 1; i < hdev->num_alloc_vport; i++) { for (i = 1; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i]; struct hclge_vport *vport = &hdev->vport[i];
/* vf keeps sending alive msg to pf per 2s, if pf doesn't
* receive a vf's alive msg for 8s, regards the vf is offline
*/
if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
...@@ -3236,28 +3238,30 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, ...@@ -3236,28 +3238,30 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
{ {
struct hclge_rss_config_cmd *req; struct hclge_rss_config_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
int key_offset; int key_offset = 0;
int key_counts;
int key_size; int key_size;
int ret; int ret;
key_counts = HCLGE_RSS_KEY_SIZE;
req = (struct hclge_rss_config_cmd *)desc.data; req = (struct hclge_rss_config_cmd *)desc.data;
for (key_offset = 0; key_offset < 3; key_offset++) { while (key_counts) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
false); false);
req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
if (key_offset == 2) if (key_counts >= HCLGE_RSS_HASH_KEY_NUM)
key_size =
HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
else
key_size = HCLGE_RSS_HASH_KEY_NUM; key_size = HCLGE_RSS_HASH_KEY_NUM;
else
key_size = key_counts;
memcpy(req->hash_key, memcpy(req->hash_key,
key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -3777,8 +3781,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, ...@@ -3777,8 +3781,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
return 0; return 0;
} }
static int hclge_map_ring_to_vector(struct hnae3_handle *handle, static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
int vector,
struct hnae3_ring_chain_node *ring_chain) struct hnae3_ring_chain_node *ring_chain)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
...@@ -3795,8 +3798,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, ...@@ -3795,8 +3798,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
} }
static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
int vector,
struct hnae3_ring_chain_node *ring_chain) struct hnae3_ring_chain_node *ring_chain)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
...@@ -3817,8 +3819,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, ...@@ -3817,8 +3819,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
if (ret) if (ret)
dev_err(&handle->pdev->dev, dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vectorid=%d, ret =%d\n", "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
vector_id, vector_id, ret);
ret);
return ret; return ret;
} }
...@@ -4122,19 +4123,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, ...@@ -4122,19 +4123,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
switch (tuple_bit) { switch (tuple_bit) {
case BIT(INNER_DST_MAC): case BIT(INNER_DST_MAC):
for (i = 0; i < 6; i++) { for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[5 - i], rule->tuples.dst_mac[i], calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]); rule->tuples_mask.dst_mac[i]);
calc_y(key_y[5 - i], rule->tuples.dst_mac[i], calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
rule->tuples_mask.dst_mac[i]); rule->tuples_mask.dst_mac[i]);
} }
return true; return true;
case BIT(INNER_SRC_MAC): case BIT(INNER_SRC_MAC):
for (i = 0; i < 6; i++) { for (i = 0; i < ETH_ALEN; i++) {
calc_x(key_x[5 - i], rule->tuples.src_mac[i], calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]); rule->tuples.src_mac[i]);
calc_y(key_y[5 - i], rule->tuples.src_mac[i], calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
rule->tuples.src_mac[i]); rule->tuples.src_mac[i]);
} }
...@@ -4170,19 +4171,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, ...@@ -4170,19 +4171,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
return true; return true;
case BIT(INNER_SRC_IP): case BIT(INNER_SRC_IP):
calc_x(tmp_x_l, rule->tuples.src_ip[3], calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[3]); rule->tuples_mask.src_ip[IPV4_INDEX]);
calc_y(tmp_y_l, rule->tuples.src_ip[3], calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
rule->tuples_mask.src_ip[3]); rule->tuples_mask.src_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
return true; return true;
case BIT(INNER_DST_IP): case BIT(INNER_DST_IP):
calc_x(tmp_x_l, rule->tuples.dst_ip[3], calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
rule->tuples_mask.dst_ip[3]); rule->tuples_mask.dst_ip[IPV4_INDEX]);
calc_y(tmp_y_l, rule->tuples.dst_ip[3], calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
rule->tuples_mask.dst_ip[3]); rule->tuples_mask.dst_ip[IPV4_INDEX]);
*(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
*(__le32 *)key_y = cpu_to_le32(tmp_y_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
...@@ -4421,6 +4422,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, ...@@ -4421,6 +4422,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS); BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */
if (!spec->ip6src[0] && !spec->ip6src[1] && if (!spec->ip6src[0] && !spec->ip6src[1] &&
!spec->ip6src[2] && !spec->ip6src[3]) !spec->ip6src[2] && !spec->ip6src[3])
*unused_tuple |= BIT(INNER_SRC_IP); *unused_tuple |= BIT(INNER_SRC_IP);
...@@ -4450,6 +4452,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, ...@@ -4450,6 +4452,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
if (!spec->ip6src[0] && !spec->ip6src[1] && if (!spec->ip6src[0] && !spec->ip6src[1] &&
!spec->ip6src[2] && !spec->ip6src[3]) !spec->ip6src[2] && !spec->ip6src[3])
*unused_tuple |= BIT(INNER_SRC_IP); *unused_tuple |= BIT(INNER_SRC_IP);
...@@ -4645,14 +4648,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -4645,14 +4648,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
case TCP_V4_FLOW: case TCP_V4_FLOW:
case UDP_V4_FLOW: case UDP_V4_FLOW:
rule->tuples.src_ip[3] = rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
rule->tuples_mask.src_ip[3] = rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
rule->tuples.dst_ip[3] = rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
rule->tuples_mask.dst_ip[3] = rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
...@@ -4671,14 +4674,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -4671,14 +4674,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break; break;
case IP_USER_FLOW: case IP_USER_FLOW:
rule->tuples.src_ip[3] = rule->tuples.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
rule->tuples_mask.src_ip[3] = rule->tuples_mask.src_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
rule->tuples.dst_ip[3] = rule->tuples.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
rule->tuples_mask.dst_ip[3] = rule->tuples_mask.dst_ip[IPV4_INDEX] =
be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
...@@ -4695,14 +4698,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -4695,14 +4698,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
case TCP_V6_FLOW: case TCP_V6_FLOW:
case UDP_V6_FLOW: case UDP_V6_FLOW:
be32_to_cpu_array(rule->tuples.src_ip, be32_to_cpu_array(rule->tuples.src_ip,
fs->h_u.tcp_ip6_spec.ip6src, 4); fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip, be32_to_cpu_array(rule->tuples_mask.src_ip,
fs->m_u.tcp_ip6_spec.ip6src, 4); fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip, be32_to_cpu_array(rule->tuples.dst_ip,
fs->h_u.tcp_ip6_spec.ip6dst, 4); fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip, be32_to_cpu_array(rule->tuples_mask.dst_ip,
fs->m_u.tcp_ip6_spec.ip6dst, 4); fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port = rule->tuples_mask.src_port =
...@@ -4718,14 +4721,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, ...@@ -4718,14 +4721,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev,
break; break;
case IPV6_USER_FLOW: case IPV6_USER_FLOW:
be32_to_cpu_array(rule->tuples.src_ip, be32_to_cpu_array(rule->tuples.src_ip,
fs->h_u.usr_ip6_spec.ip6src, 4); fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.src_ip, be32_to_cpu_array(rule->tuples_mask.src_ip,
fs->m_u.usr_ip6_spec.ip6src, 4); fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
be32_to_cpu_array(rule->tuples.dst_ip, be32_to_cpu_array(rule->tuples.dst_ip,
fs->h_u.usr_ip6_spec.ip6dst, 4); fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
be32_to_cpu_array(rule->tuples_mask.dst_ip, be32_to_cpu_array(rule->tuples_mask.dst_ip,
fs->m_u.usr_ip6_spec.ip6dst, 4); fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
...@@ -4898,18 +4901,16 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, ...@@ -4898,18 +4901,16 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) { if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Delete fail, rule %d is inexistent\n", "Delete fail, rule %d is inexistent\n", fs->location);
fs->location);
return -ENOENT; return -ENOENT;
} }
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
fs->location, NULL, false); NULL, false);
if (ret) if (ret)
return ret; return ret;
return hclge_fd_update_rule_list(hdev, NULL, fs->location, return hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
false);
} }
static void hclge_del_all_fd_entries(struct hnae3_handle *handle, static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
...@@ -4995,13 +4996,13 @@ static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, ...@@ -4995,13 +4996,13 @@ static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip4_spec *spec, struct ethtool_tcpip4_spec *spec,
struct ethtool_tcpip4_spec *spec_mask) struct ethtool_tcpip4_spec *spec_mask)
{ {
spec->ip4src = cpu_to_be32(rule->tuples.src_ip[3]); spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[3]); spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[3]);
spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
spec->psrc = cpu_to_be16(rule->tuples.src_port); spec->psrc = cpu_to_be16(rule->tuples.src_port);
spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
...@@ -5020,13 +5021,13 @@ static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, ...@@ -5020,13 +5021,13 @@ static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
struct ethtool_usrip4_spec *spec, struct ethtool_usrip4_spec *spec,
struct ethtool_usrip4_spec *spec_mask) struct ethtool_usrip4_spec *spec_mask)
{ {
spec->ip4src = cpu_to_be32(rule->tuples.src_ip[3]); spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[3]); spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
spec->tos = rule->tuples.ip_tos; spec->tos = rule->tuples.ip_tos;
spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
...@@ -5044,18 +5045,20 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, ...@@ -5044,18 +5045,20 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip6_spec *spec_mask) struct ethtool_tcpip6_spec *spec_mask)
{ {
cpu_to_be32_array(spec->ip6src, cpu_to_be32_array(spec->ip6src,
rule->tuples.src_ip, 4); rule->tuples.src_ip, IPV6_SIZE);
cpu_to_be32_array(spec->ip6dst, cpu_to_be32_array(spec->ip6dst,
rule->tuples.dst_ip, 4); rule->tuples.dst_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_SRC_IP)) if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(int) * 4); memset(spec_mask->ip6src, 0, sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(spec->ip6src, rule->tuples_mask.src_ip, 4); cpu_to_be32_array(spec->ip6src, rule->tuples_mask.src_ip,
IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP)) if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(int) * 4); memset(spec_mask->ip6dst, 0, sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(spec->ip6dst, rule->tuples_mask.dst_ip, 4); cpu_to_be32_array(spec->ip6dst, rule->tuples_mask.dst_ip,
IPV6_SIZE);
spec->psrc = cpu_to_be16(rule->tuples.src_port); spec->psrc = cpu_to_be16(rule->tuples.src_port);
spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
...@@ -5070,21 +5073,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, ...@@ -5070,21 +5073,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
struct ethtool_usrip6_spec *spec, struct ethtool_usrip6_spec *spec,
struct ethtool_usrip6_spec *spec_mask) struct ethtool_usrip6_spec *spec_mask)
{ {
cpu_to_be32_array(spec->ip6src, cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
rule->tuples.src_ip, 4); cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
cpu_to_be32_array(spec->ip6dst,
rule->tuples.dst_ip, 4);
if (rule->unused_tuple & BIT(INNER_SRC_IP)) if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(int) * 4); memset(spec_mask->ip6src, 0, sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(spec_mask->ip6src, cpu_to_be32_array(spec_mask->ip6src,
rule->tuples_mask.src_ip, 4); rule->tuples_mask.src_ip, IPV6_SIZE);
if (rule->unused_tuple & BIT(INNER_DST_IP)) if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(int) * 4); memset(spec_mask->ip6dst, 0, sizeof(int) * IPV6_SIZE);
else else
cpu_to_be32_array(spec_mask->ip6dst, cpu_to_be32_array(spec_mask->ip6dst,
rule->tuples_mask.dst_ip, 4); rule->tuples_mask.dst_ip, IPV6_SIZE);
spec->l4_proto = rule->tuples.ip_proto; spec->l4_proto = rule->tuples.ip_proto;
spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
...@@ -5345,7 +5346,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, ...@@ -5345,7 +5346,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
#define HCLGE_SERDES_RETRY_NUM 100 #define HCLGE_SERDES_RETRY_NUM 100
#define HCLGE_MAC_LINK_STATUS_MS 10 #define HCLGE_MAC_LINK_STATUS_MS 10
#define HCLGE_MAC_LINK_STATUS_NUM 20 #define HCLGE_MAC_LINK_STATUS_NUM 100
#define HCLGE_MAC_LINK_STATUS_DOWN 0 #define HCLGE_MAC_LINK_STATUS_DOWN 0
#define HCLGE_MAC_LINK_STATUS_UP 1 #define HCLGE_MAC_LINK_STATUS_UP 1
...@@ -5603,11 +5604,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, ...@@ -5603,11 +5604,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) { if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) { if ((!resp_code) || (resp_code == 1)) {
return 0; return 0;
} else if (resp_code == 2) { } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n"); "add mac addr failed for uc_overflow.\n");
return -ENOSPC; return -ENOSPC;
} else if (resp_code == 3) { } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n"); "add mac addr failed for mc_overflow.\n");
return -ENOSPC; return -ENOSPC;
...@@ -5652,13 +5653,14 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, ...@@ -5652,13 +5653,14 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
{ {
#define HCLGE_VF_NUM_IN_FIRST_DESC 192
int word_num; int word_num;
int bit_num; int bit_num;
if (vfid > 255 || vfid < 0) if (vfid > 255 || vfid < 0)
return -EIO; return -EIO;
if (vfid >= 0 && vfid <= 191) { if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
word_num = vfid / 32; word_num = vfid / 32;
bit_num = vfid % 32; bit_num = vfid % 32;
if (clr) if (clr)
...@@ -5666,7 +5668,7 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) ...@@ -5666,7 +5668,7 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
else else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else { } else {
word_num = (vfid - 192) / 32; word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32; bit_num = vfid % 32;
if (clr) if (clr)
desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
...@@ -5850,6 +5852,10 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) ...@@ -5850,6 +5852,10 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
mutex_init(&hdev->umv_mutex); mutex_init(&hdev->umv_mutex);
hdev->max_umv_size = allocated_size; hdev->max_umv_size = allocated_size;
/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
* preserve some unicast mac vlan table entries shared by pf
* and its vfs.
*/
hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
hdev->share_umv_size = hdev->priv_umv_size + hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_req_vfs + 2); hdev->max_umv_size % (hdev->num_req_vfs + 2);
...@@ -5936,9 +5942,12 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) ...@@ -5936,9 +5942,12 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
if (is_free) { if (is_free) {
if (vport->used_umv_num > hdev->priv_umv_size) if (vport->used_umv_num > hdev->priv_umv_size)
hdev->share_umv_size++; hdev->share_umv_size++;
if (vport->used_umv_num > 0)
vport->used_umv_num--; vport->used_umv_num--;
} else { } else {
if (vport->used_umv_num >= hdev->priv_umv_size) if (vport->used_umv_num >= hdev->priv_umv_size &&
hdev->share_umv_size > 0)
hdev->share_umv_size--; hdev->share_umv_size--;
vport->used_umv_num++; vport->used_umv_num++;
} }
...@@ -5968,8 +5977,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, ...@@ -5968,8 +5977,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
is_multicast_ether_addr(addr)) { is_multicast_ether_addr(addr)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
addr, addr, is_zero_ether_addr(addr),
is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr), is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr)); is_multicast_ether_addr(addr));
return -EINVAL; return -EINVAL;
...@@ -6033,8 +6041,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, ...@@ -6033,8 +6041,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) || if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) || is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) { is_multicast_ether_addr(addr)) {
dev_dbg(&hdev->pdev->dev, dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
"Remove mac err! invalid mac:%pM.\n",
addr); addr);
return -EINVAL; return -EINVAL;
} }
...@@ -6075,18 +6082,16 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, ...@@ -6075,18 +6082,16 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
hclge_prepare_mac_addr(&req, addr, true); hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) { if (status) {
/* This mac addr exist, update VFID for it */
hclge_update_desc_vfid(desc, vport->vport_id, false);
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
} else {
/* This mac addr do not exist, add new entry for it */ /* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data)); memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data)); memset(desc[1].data, 0, sizeof(desc[0].data));
memset(desc[2].data, 0, sizeof(desc[0].data)); memset(desc[2].data, 0, sizeof(desc[0].data));
hclge_update_desc_vfid(desc, vport->vport_id, false);
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
} }
status = hclge_update_desc_vfid(desc, vport->vport_id, false);
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
if (status == -ENOSPC) if (status == -ENOSPC)
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
...@@ -6123,7 +6128,9 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, ...@@ -6123,7 +6128,9 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) { if (!status) {
/* This mac addr exist, remove this handle's VFID for it */ /* This mac addr exist, remove this handle's VFID for it */
hclge_update_desc_vfid(desc, vport->vport_id, true); status = hclge_update_desc_vfid(desc, vport->vport_id, true);
if (status)
return status;
if (hclge_is_all_function_id_zero(desc)) if (hclge_is_all_function_id_zero(desc))
/* All the vfid is zero, so need to delete this entry */ /* All the vfid is zero, so need to delete this entry */
...@@ -6149,7 +6156,6 @@ void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, ...@@ -6149,7 +6156,6 @@ void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
enum HCLGE_MAC_ADDR_TYPE mac_type) enum HCLGE_MAC_ADDR_TYPE mac_type)
{ {
struct hclge_vport_mac_addr_cfg *mac_cfg; struct hclge_vport_mac_addr_cfg *mac_cfg;
struct hclge_dev *hdev = vport->back;
struct list_head *list; struct list_head *list;
if (!vport->vport_id) if (!vport->vport_id)
...@@ -6159,16 +6165,13 @@ void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, ...@@ -6159,16 +6165,13 @@ void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
if (!mac_cfg) if (!mac_cfg)
return; return;
mac_cfg->vport_id = vport->vport_id;
mac_cfg->hd_tbl_status = true; mac_cfg->hd_tbl_status = true;
memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
list = (mac_type == HCLGE_MAC_ADDR_UC) ? list = (mac_type == HCLGE_MAC_ADDR_UC) ?
&hdev->uc_mac_list : &hdev->mc_mac_list; &vport->uc_mac_list : &vport->mc_mac_list;
mutex_lock(&hdev->vport_cfg_mutex);
list_add_tail(&mac_cfg->node, list); list_add_tail(&mac_cfg->node, list);
mutex_unlock(&hdev->vport_cfg_mutex);
} }
void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
...@@ -6176,20 +6179,17 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, ...@@ -6176,20 +6179,17 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
enum HCLGE_MAC_ADDR_TYPE mac_type) enum HCLGE_MAC_ADDR_TYPE mac_type)
{ {
struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
struct hclge_dev *hdev = vport->back;
struct list_head *list; struct list_head *list;
bool uc_flag, mc_flag; bool uc_flag, mc_flag;
list = (mac_type == HCLGE_MAC_ADDR_UC) ? list = (mac_type == HCLGE_MAC_ADDR_UC) ?
&hdev->uc_mac_list : &hdev->mc_mac_list; &vport->uc_mac_list : &vport->mc_mac_list;
uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
mutex_lock(&hdev->vport_cfg_mutex);
list_for_each_entry_safe(mac_cfg, tmp, list, node) { list_for_each_entry_safe(mac_cfg, tmp, list, node) {
if (mac_cfg->vport_id == vport->vport_id && if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
if (uc_flag && mac_cfg->hd_tbl_status) if (uc_flag && mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport, mac_addr); hclge_rm_uc_addr_common(vport, mac_addr);
...@@ -6201,32 +6201,23 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, ...@@ -6201,32 +6201,23 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
break; break;
} }
} }
mutex_unlock(&hdev->vport_cfg_mutex);
} }
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type) enum HCLGE_MAC_ADDR_TYPE mac_type)
{ {
struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
struct hclge_dev *hdev = vport->back;
struct list_head *list; struct list_head *list;
list = (mac_type == HCLGE_MAC_ADDR_UC) ? list = (mac_type == HCLGE_MAC_ADDR_UC) ?
&hdev->uc_mac_list : &hdev->mc_mac_list; &vport->uc_mac_list : &vport->mc_mac_list;
mutex_lock(&hdev->vport_cfg_mutex);
list_for_each_entry_safe(mac_cfg, tmp, list, node) { list_for_each_entry_safe(mac_cfg, tmp, list, node) {
if (mac_cfg->vport_id == vport->vport_id) { if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
if (mac_type == HCLGE_MAC_ADDR_UC && hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport,
mac_cfg->mac_addr);
if (mac_type == HCLGE_MAC_ADDR_MC && if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
mac_cfg->hd_tbl_status) hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
hclge_rm_mc_addr_common(vport,
mac_cfg->mac_addr);
mac_cfg->hd_tbl_status = false; mac_cfg->hd_tbl_status = false;
if (is_del_list) { if (is_del_list) {
...@@ -6234,26 +6225,27 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, ...@@ -6234,26 +6225,27 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
kfree(mac_cfg); kfree(mac_cfg);
} }
} }
}
mutex_unlock(&hdev->vport_cfg_mutex);
} }
static void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) static void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
{ {
struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; struct hclge_vport_mac_addr_cfg *mac, *tmp;
struct hclge_vport *vport;
int i;
mutex_lock(&hdev->vport_cfg_mutex); mutex_lock(&hdev->vport_cfg_mutex);
list_for_each_entry_safe(mac_cfg, tmp, &hdev->uc_mac_list, node) { for (i = 0; i < hdev->num_alloc_vport; i++) {
list_del(&mac_cfg->node); vport = &hdev->vport[i];
kfree(mac_cfg); list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
list_del(&mac->node);
kfree(mac);
} }
list_for_each_entry_safe(mac_cfg, tmp, &hdev->mc_mac_list, node) { list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
list_del(&mac_cfg->node); list_del(&mac->node);
kfree(mac_cfg); kfree(mac);
}
} }
mutex_unlock(&hdev->vport_cfg_mutex); mutex_unlock(&hdev->vport_cfg_mutex);
} }
...@@ -6368,7 +6360,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, ...@@ -6368,7 +6360,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
return -EINVAL; return -EINVAL;
} }
if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) if ((!is_first || is_kdump_kernel()) &&
hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"remove old uc mac address fail.\n"); "remove old uc mac address fail.\n");
...@@ -6412,7 +6405,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, ...@@ -6412,7 +6405,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
} }
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
u8 fe_type, bool filter_en) u8 fe_type, bool filter_en, u8 vf_id)
{ {
struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
...@@ -6423,6 +6416,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, ...@@ -6423,6 +6416,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type; req->vlan_type = vlan_type;
req->vlan_fe = filter_en ? fe_type : 0; req->vlan_fe = filter_en ? fe_type : 0;
req->vf_id = vf_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret)
...@@ -6451,12 +6445,13 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) ...@@ -6451,12 +6445,13 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) { if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) {
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS, enable); HCLGE_FILTER_FE_EGRESS, enable, 0);
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
HCLGE_FILTER_FE_INGRESS, enable); HCLGE_FILTER_FE_INGRESS, enable, 0);
} else { } else {
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS_V1_B, enable); HCLGE_FILTER_FE_EGRESS_V1_B, enable,
0);
} }
if (enable) if (enable)
handle->netdev_flags |= HNAE3_VLAN_FLTR; handle->netdev_flags |= HNAE3_VLAN_FLTR;
...@@ -6551,9 +6546,9 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, ...@@ -6551,9 +6546,9 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
vlan_offset_160 = vlan_id / 160; vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_B;
vlan_offset_byte = (vlan_id % 160) / 8; vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_B) / HCLGE_VLAN_BYTE_SIZE;
vlan_offset_byte_val = 1 << (vlan_id % 8); vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160; req->vlan_offset = vlan_offset_160;
...@@ -6813,19 +6808,27 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) ...@@ -6813,19 +6808,27 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
int i; int i;
if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) { if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) {
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, /* for revision 0x21, vf vlan filter is per function */
HCLGE_FILTER_FE_EGRESS, true); for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev,
HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS,
true,
vport->vport_id);
if (ret) if (ret)
return ret; return ret;
}
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
HCLGE_FILTER_FE_INGRESS, true); HCLGE_FILTER_FE_INGRESS, true,
0);
if (ret) if (ret)
return ret; return ret;
} else { } else {
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS_V1_B, HCLGE_FILTER_FE_EGRESS_V1_B,
true); true, 0);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -6944,6 +6947,7 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) ...@@ -6944,6 +6947,7 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport; struct hclge_vport *vport;
int i; int i;
mutex_lock(&hdev->vport_cfg_mutex);
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i]; vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
...@@ -6951,6 +6955,7 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) ...@@ -6951,6 +6955,7 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan); kfree(vlan);
} }
} }
mutex_unlock(&hdev->vport_cfg_mutex);
} }
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
...@@ -7074,7 +7079,11 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, ...@@ -7074,7 +7079,11 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 state; u16 state;
int ret; int ret;
if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) if (hdev->pdev->revision == 0x20)
return -EOPNOTSUPP;
/* qos is a 3 bits value, so can not be bigger than 7 */
if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
return -EINVAL; return -EINVAL;
if (proto != htons(ETH_P_8021Q)) if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
...@@ -7135,8 +7144,9 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) ...@@ -7135,8 +7144,9 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int i, max_frm_size, ret = 0; int i, max_frm_size, ret;
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME || if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME) max_frm_size > HCLGE_MAC_MAX_FRAME)
...@@ -7246,7 +7256,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -7246,7 +7256,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
int reset_try_times = 0; int reset_try_times = 0;
int reset_status; int reset_status;
u16 queue_gid; u16 queue_gid;
int ret = 0; int ret;
queue_gid = hclge_covert_handle_qid_global(handle, queue_id); queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
...@@ -7263,7 +7273,6 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -7263,7 +7273,6 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret; return ret;
} }
reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */ /* Wait for tqp hw reset */
msleep(20); msleep(20);
...@@ -7302,7 +7311,6 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) ...@@ -7302,7 +7311,6 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
return; return;
} }
reset_try_times = 0;
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
/* Wait for tqp hw reset */ /* Wait for tqp hw reset */
msleep(20); msleep(20);
...@@ -7819,12 +7827,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -7819,12 +7827,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->reset_type = HNAE3_NONE_RESET; hdev->reset_type = HNAE3_NONE_RESET;
hdev->reset_level = HNAE3_FUNC_RESET; hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev; ae_dev->priv = hdev;
/* HW supprt 2 layer vlan */
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock); mutex_init(&hdev->vport_lock);
mutex_init(&hdev->vport_cfg_mutex); mutex_init(&hdev->vport_cfg_mutex);
INIT_LIST_HEAD(&hdev->uc_mac_list);
INIT_LIST_HEAD(&hdev->mc_mac_list);
ret = hclge_pci_init(hdev); ret = hclge_pci_init(hdev);
if (ret) { if (ret) {
...@@ -7997,7 +8005,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -7997,7 +8005,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
err_msi_uninit: err_msi_uninit:
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
err_cmd_uninit: err_cmd_uninit:
hclge_destroy_cmd_queue(&hdev->hw); hclge_cmd_uninit(hdev);
err_pci_uninit: err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.io_base); pcim_iounmap(pdev, hdev->hw.io_base);
pci_clear_master(pdev); pci_clear_master(pdev);
...@@ -8018,7 +8026,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev) ...@@ -8018,7 +8026,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
int i; int i;
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
hclge_vport_start(vport); hclge_vport_stop(vport);
vport++; vport++;
} }
} }
...@@ -8088,8 +8096,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -8088,8 +8096,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_init_fd_config(hdev); ret = hclge_init_fd_config(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
"fd table init fail, ret=%d\n", ret);
return ret; return ret;
} }
...@@ -8135,7 +8142,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -8135,7 +8142,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
synchronize_irq(hdev->misc_vector.vector_irq); synchronize_irq(hdev->misc_vector.vector_irq);
hclge_hw_error_set_state(hdev, false); hclge_hw_error_set_state(hdev, false);
hclge_destroy_cmd_queue(&hdev->hw); hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev); hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev); hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock); mutex_destroy(&hdev->vport_lock);
...@@ -8272,6 +8279,7 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, ...@@ -8272,6 +8279,7 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data) void *data)
{ {
#define HCLGE_32_BIT_REG_RTN_DATANUM 8 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
#define HCLGE_32_BIT_DESC_NODATA_LEN 2
struct hclge_desc *desc; struct hclge_desc *desc;
u32 *reg_val = data; u32 *reg_val = data;
...@@ -8283,7 +8291,8 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, ...@@ -8283,7 +8291,8 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0) if (regs_num == 0)
return 0; return 0;
cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); cmd_num = DIV_ROUND_UP(regs_num + HCLGE_32_BIT_DESC_NODATA_LEN,
HCLGE_32_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc) if (!desc)
return -ENOMEM; return -ENOMEM;
...@@ -8300,7 +8309,8 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, ...@@ -8300,7 +8309,8 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) { for (i = 0; i < cmd_num; i++) {
if (i == 0) { if (i == 0) {
desc_data = (__le32 *)(&desc[i].data[0]); desc_data = (__le32 *)(&desc[i].data[0]);
n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; n = HCLGE_32_BIT_REG_RTN_DATANUM -
HCLGE_32_BIT_DESC_NODATA_LEN;
} else { } else {
desc_data = (__le32 *)(&desc[i]); desc_data = (__le32 *)(&desc[i]);
n = HCLGE_32_BIT_REG_RTN_DATANUM; n = HCLGE_32_BIT_REG_RTN_DATANUM;
...@@ -8322,6 +8332,7 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, ...@@ -8322,6 +8332,7 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
void *data) void *data)
{ {
#define HCLGE_64_BIT_REG_RTN_DATANUM 4 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
#define HCLGE_64_BIT_DESC_NODATA_LEN 1
struct hclge_desc *desc; struct hclge_desc *desc;
u64 *reg_val = data; u64 *reg_val = data;
...@@ -8333,7 +8344,8 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, ...@@ -8333,7 +8344,8 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
if (regs_num == 0) if (regs_num == 0)
return 0; return 0;
cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); cmd_num = DIV_ROUND_UP(regs_num + HCLGE_64_BIT_DESC_NODATA_LEN,
HCLGE_64_BIT_REG_RTN_DATANUM);
desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc) if (!desc)
return -ENOMEM; return -ENOMEM;
...@@ -8350,7 +8362,8 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, ...@@ -8350,7 +8362,8 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
for (i = 0; i < cmd_num; i++) { for (i = 0; i < cmd_num; i++) {
if (i == 0) { if (i == 0) {
desc_data = (__le64 *)(&desc[i].data[0]); desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; n = HCLGE_64_BIT_REG_RTN_DATANUM -
HCLGE_64_BIT_DESC_NODATA_LEN;
} else { } else {
desc_data = (__le64 *)(&desc[i]); desc_data = (__le64 *)(&desc[i]);
n = HCLGE_64_BIT_REG_RTN_DATANUM; n = HCLGE_64_BIT_REG_RTN_DATANUM;
...@@ -8410,8 +8423,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, ...@@ -8410,8 +8423,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
} }
data = (u32 *)data + regs_num_32_bit; data = (u32 *)data + regs_num_32_bit;
ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, data);
data);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret); "Get 64 bit register failed, ret = %d.\n", ret);
......
...@@ -62,6 +62,9 @@ ...@@ -62,6 +62,9 @@
#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
#define HCLGE_VLAN_ID_B 160
#define HCLGE_VLAN_BYTE_SIZE 8
#define HCLGE_TQP_RESET_TRY_TIMES 10 #define HCLGE_TQP_RESET_TRY_TIMES 10
#define HCLGE_PHY_PAGE_MDIX 0 #define HCLGE_PHY_PAGE_MDIX 0
...@@ -190,7 +193,7 @@ struct hclge_mac { ...@@ -190,7 +193,7 @@ struct hclge_mac {
u8 autoneg; u8 autoneg;
u8 duplex; u8 duplex;
u32 speed; u32 speed;
int link; /* store the link status of mac & phy (if phy exit)*/ int link; /* store the link status of mac & phy (if phy exit) */
struct phy_device *phydev; struct phy_device *phydev;
struct mii_bus *mdio_bus; struct mii_bus *mdio_bus;
phy_interface_t phy_if; phy_interface_t phy_if;
...@@ -406,6 +409,7 @@ enum HCLGE_FD_KEY_TYPE { ...@@ -406,6 +409,7 @@ enum HCLGE_FD_KEY_TYPE {
enum HCLGE_FD_STAGE { enum HCLGE_FD_STAGE {
HCLGE_FD_STAGE_1, HCLGE_FD_STAGE_1,
HCLGE_FD_STAGE_2, HCLGE_FD_STAGE_2,
MAX_STAGE_NUM,
}; };
/* OUTER_XXX indicates tuples in tunnel header of tunnel packet /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
...@@ -460,7 +464,7 @@ enum HCLGE_FD_META_DATA { ...@@ -460,7 +464,7 @@ enum HCLGE_FD_META_DATA {
struct key_info { struct key_info {
u8 key_type; u8 key_type;
u8 key_length; u8 key_length; /* use bit as unit */
}; };
static const struct key_info meta_data_key_info[] = { static const struct key_info meta_data_key_info[] = {
...@@ -534,18 +538,23 @@ struct hclge_fd_key_cfg { ...@@ -534,18 +538,23 @@ struct hclge_fd_key_cfg {
struct hclge_fd_cfg { struct hclge_fd_cfg {
u8 fd_mode; u8 fd_mode;
u16 max_key_length; u16 max_key_length; /* use bit as unit */
u32 proto_support; u32 proto_support;
u32 rule_num[2]; /* rule entry number */ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
u16 cnt_num[2]; /* rule hit counter number */ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
struct hclge_fd_key_cfg key_cfg[2]; struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
}; };
#define IPV4_INDEX 3
#define IPV6_SIZE 4
struct hclge_fd_rule_tuples { struct hclge_fd_rule_tuples {
u8 src_mac[6]; u8 src_mac[ETH_ALEN];
u8 dst_mac[6]; u8 dst_mac[ETH_ALEN];
u32 src_ip[4]; /* Be compatible for ip address of both ipv4 and ipv6.
u32 dst_ip[4]; * For ipv4 address, we store it in src/dst_ip[3].
*/
u32 src_ip[IPV6_SIZE];
u32 dst_ip[IPV6_SIZE];
u16 src_port; u16 src_port;
u16 dst_port; u16 dst_port;
u16 vlan_tag1; u16 vlan_tag1;
...@@ -581,7 +590,6 @@ struct hclge_fd_ad_data { ...@@ -581,7 +590,6 @@ struct hclge_fd_ad_data {
struct hclge_vport_mac_addr_cfg { struct hclge_vport_mac_addr_cfg {
struct list_head node; struct list_head node;
int vport_id;
int hd_tbl_status; int hd_tbl_status;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
}; };
...@@ -739,8 +747,6 @@ struct hclge_dev { ...@@ -739,8 +747,6 @@ struct hclge_dev {
struct mutex umv_mutex; /* protect share_umv_size */ struct mutex umv_mutex; /* protect share_umv_size */
struct mutex vport_cfg_mutex; /* Protect stored vf table */ struct mutex vport_cfg_mutex; /* Protect stored vf table */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
}; };
/* VPort level vlan tag configuration for TX direction */ /* VPort level vlan tag configuration for TX direction */
...@@ -780,6 +786,17 @@ enum HCLGE_VPORT_STATE { ...@@ -780,6 +786,17 @@ enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_MAX HCLGE_VPORT_STATE_MAX
}; };
#pragma pack(1)
struct hclge_vf_vlan_cfg {
u8 mbx_cmd;
u8 subcode;
u8 is_kill;
u16 vlan;
u16 proto;
};
#pragma pack()
struct hclge_vlan_info { struct hclge_vlan_info {
u16 vlan_proto; /* sofar support 802.1Q only */ u16 vlan_proto; /* sofar support 802.1Q only */
u16 qos; u16 qos;
...@@ -822,6 +839,9 @@ struct hclge_vport { ...@@ -822,6 +839,9 @@ struct hclge_vport {
unsigned long state; unsigned long state;
unsigned long last_active_jiffies; unsigned long last_active_jiffies;
int mps; /* Max packet size */ int mps; /* Max packet size */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
}; };
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
......
...@@ -308,34 +308,34 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, ...@@ -308,34 +308,34 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req) struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{ {
struct hclge_vf_vlan_cfg *msg_cmd;
int status = 0; int status = 0;
if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
struct hnae3_handle *handle = &vport->nic; struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto; u16 vlan, proto;
bool is_kill; bool is_kill;
is_kill = !!mbx_req->msg[2]; is_kill = !!msg_cmd->is_kill;
memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); vlan = msg_cmd->vlan;
memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill); vlan, is_kill);
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic; struct hnae3_handle *handle = &vport->nic;
bool en = mbx_req->msg[2] ? true : false; bool en = msg_cmd->is_kill ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en); status = hclge_en_hw_strip_rxvtag(handle, en);
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) { } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
struct hclge_vlan_info vlan_info; struct hclge_vlan_info *vlan_info;
u16 state; u16 *state;
memcpy(&state, &mbx_req->msg[2], sizeof(u16)); state = (u16 *)&mbx_req->msg[2];
memcpy(&vlan_info.vlan_tag, &mbx_req->msg[4], sizeof(u16)); vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
memcpy(&vlan_info.qos, &mbx_req->msg[6], sizeof(u16)); status = hclge_update_port_base_vlan_cfg(vport, *state,
memcpy(&vlan_info.vlan_proto, &mbx_req->msg[8], sizeof(u16)); vlan_info);
status = hclge_update_port_base_vlan_cfg(vport, state, } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
&vlan_info);
} else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
u8 state; u8 state;
state = vport->port_base_vlan_cfg.state; state = vport->port_base_vlan_cfg.state;
...@@ -373,7 +373,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport, ...@@ -373,7 +373,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
vf_tc_map |= BIT(i); vf_tc_map |= BIT(i);
ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map, ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
sizeof(u8)); sizeof(vf_tc_map));
return ret; return ret;
} }
...@@ -410,24 +410,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport, ...@@ -410,24 +410,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
HCLGE_TQPS_DEPTH_INFO_LEN); HCLGE_TQPS_DEPTH_INFO_LEN);
} }
static int hclge_get_vf_media_type(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_dev *hdev = vport->back;
u8 resp_data;
resp_data = hdev->hw.mac.media_type;
return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data,
sizeof(resp_data));
}
static int hclge_get_link_info(struct hclge_vport *vport, static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req) struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 link_status; u16 link_status;
u8 msg_data[10]; u8 msg_data[8];
u16 media_type;
u8 dest_vfid; u8 dest_vfid;
u16 duplex; u16 duplex;
/* mac.link can only be 0 or 1 */ /* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link; link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex; duplex = hdev->hw.mac.duplex;
media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16)); memcpy(&msg_data[6], &duplex, sizeof(u16));
memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid; dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */ /* send this requested info to VF */
...@@ -669,11 +677,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -669,11 +677,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret); ret);
break; break;
case HCLGE_MBX_GET_VF_FLR_STATUS: case HCLGE_MBX_GET_VF_FLR_STATUS:
mutex_lock(&hdev->vport_cfg_mutex);
hclge_rm_vport_all_mac_table(vport, true, hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC); HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true, hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_MC); HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true); hclge_rm_vport_all_vlan_table(vport, true);
mutex_unlock(&hdev->vport_cfg_mutex);
break; break;
case HCLGE_MBX_GET_RSS_KEY: case HCLGE_MBX_GET_RSS_KEY:
ret = hclge_get_rss_key(vport, req); ret = hclge_get_rss_key(vport, req);
...@@ -684,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -684,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_GET_LINK_MODE: case HCLGE_MBX_GET_LINK_MODE:
hclge_get_vf_link_mode(vport, req); hclge_get_vf_link_mode(vport, req);
break; break;
case HCLGE_MBX_GET_MEDIA_TYPE:
ret = hclge_get_vf_media_type(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to media type for VF\n",
ret);
break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n", "un-supported mailbox message, code = %d\n",
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited. // Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/marvell_phy.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include "kcompat.h" #include "kcompat.h"
#include "hclge_cmd.h" #include "hclge_cmd.h"
...@@ -27,17 +28,6 @@ enum hclge_mdio_c22_op_seq { ...@@ -27,17 +28,6 @@ enum hclge_mdio_c22_op_seq {
#define HCLGE_MDIO_STA_B 0 #define HCLGE_MDIO_STA_B 0
struct hclge_mdio_cfg_cmd {
u8 ctrl_bit;
u8 phyid;
u8 phyad;
u8 rsvd;
__le16 reserve;
__le16 data_wr;
__le16 data_rd;
__le16 sta;
};
static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
u16 data) u16 data)
{ {
...@@ -119,6 +109,13 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) ...@@ -119,6 +109,13 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return le16_to_cpu(mdio_cmd->data_rd); return le16_to_cpu(mdio_cmd->data_rd);
} }
static int hclge_phy_marvell_fixup(struct phy_device *phydev)
{
phydev->dev_flags |= MARVELL_PHY_M1510_HNS3_LEDS;
return 0;
}
int hclge_mac_mdio_config(struct hclge_dev *hdev) int hclge_mac_mdio_config(struct hclge_dev *hdev)
{ {
struct hclge_mac *mac = &hdev->hw.mac; struct hclge_mac *mac = &hdev->hw.mac;
...@@ -162,6 +159,15 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) ...@@ -162,6 +159,15 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mac->phydev = phydev; mac->phydev = phydev;
mac->mdio_bus = mdio_bus; mac->mdio_bus = mdio_bus;
/* register the PHY board fixup (for Marvell 88E1510) */
ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510,
MARVELL_PHY_ID_MASK,
hclge_phy_marvell_fixup);
/* we can live without it, so just issue a warning */
if (ret)
dev_warn(&hdev->pdev->dev,
"Cannot register PHY board fixup\n");
return 0; return 0;
} }
...@@ -195,11 +201,29 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) ...@@ -195,11 +201,29 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct net_device *netdev = hdev->vport[0].nic.netdev; struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev; struct phy_device *phydev = hdev->hw.mac.phydev;
#ifdef HAS_LINK_MODE_OPS
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
#endif
int ret; int ret;
if (!phydev) if (!phydev)
return 0; return 0;
#ifdef HAS_LINK_MODE_OPS
linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
if (ret) {
netdev_err(netdev, "phy_connect_direct err.\n");
return ret;
}
linkmode_copy(mask, hdev->hw.mac.supported);
linkmode_and(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported);
#else
phydev->supported &= ~SUPPORTED_FIBRE; phydev->supported &= ~SUPPORTED_FIBRE;
ret = phy_connect_direct(netdev, phydev, ret = phy_connect_direct(netdev, phydev,
...@@ -212,7 +236,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) ...@@ -212,7 +236,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
phydev->supported &= *hdev->hw.mac.supported; phydev->supported &= *hdev->hw.mac.supported;
phydev->advertising = phydev->supported; phydev->advertising = phydev->supported;
#endif
return 0; return 0;
} }
...@@ -225,6 +249,9 @@ void hclge_mac_disconnect_phy(struct hnae3_handle *handle) ...@@ -225,6 +249,9 @@ void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
if (!phydev) if (!phydev)
return; return;
phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510,
MARVELL_PHY_ID_MASK);
phy_disconnect(phydev); phy_disconnect(phydev);
} }
......
...@@ -4,6 +4,17 @@ ...@@ -4,6 +4,17 @@
#ifndef __HCLGE_MDIO_H #ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H #define __HCLGE_MDIO_H
struct hclge_mdio_cfg_cmd {
u8 ctrl_bit;
u8 phyid;
u8 phyad;
u8 rsvd;
__le16 reserve;
__le16 data_wr;
__le16 data_rd;
__le16 sta;
};
int hclge_mac_mdio_config(struct hclge_dev *hdev); int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle); int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
......
...@@ -43,13 +43,17 @@ enum hclge_shaper_level { ...@@ -43,13 +43,17 @@ enum hclge_shaper_level {
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
u8 *ir_b, u8 *ir_u, u8 *ir_s) u8 *ir_b, u8 *ir_u, u8 *ir_s)
{ {
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */ 6 * 256, /* Prioriy level */
6 * 32, /* Prioriy group level */ 6 * 32, /* Prioriy group level */
6 * 8, /* Port level */ 6 * 8, /* Port level */
6 * 256 /* Qset level */ 6 * 256 /* Qset level */
}; };
u8 ir_u_calc = 0, ir_s_calc = 0; u8 ir_u_calc = 0;
u8 ir_s_calc = 0;
u32 ir_calc; u32 ir_calc;
u32 tick; u32 tick;
...@@ -66,7 +70,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -66,7 +70,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000 * ir_calc = ---------------- * 1000
* tick * 1 * tick * 1
*/ */
ir_calc = (1008000 + (tick >> 1) - 1) / tick; ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) { if (ir_calc == ir) {
*ir_b = 126; *ir_b = 126;
...@@ -78,27 +82,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -78,27 +82,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */ /* Increasing the denominator to select ir_s value */
while (ir_calc > ir) { while (ir_calc > ir) {
ir_s_calc++; ir_s_calc++;
ir_calc = 1008000 / (tick * (1 << ir_s_calc)); ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
} }
if (ir_calc == ir) if (ir_calc == ir)
*ir_b = 126; *ir_b = 126;
else else
*ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; *ir_b = (ir * tick * (1 << ir_s_calc) +
(DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else { } else {
/* Increasing the numerator to select ir_u value */ /* Increasing the numerator to select ir_u value */
u32 numerator; u32 numerator;
while (ir_calc < ir) { while (ir_calc < ir) {
ir_u_calc++; ir_u_calc++;
numerator = 1008000 * (1 << ir_u_calc); numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick; ir_calc = (numerator + (tick >> 1)) / tick;
} }
if (ir_calc == ir) { if (ir_calc == ir) {
*ir_b = 126; *ir_b = 126;
} else { } else {
u32 denominator = (8000 * (1 << --ir_u_calc)); u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
*ir_b = (ir * tick + (denominator >> 1)) / denominator; *ir_b = (ir * tick + (denominator >> 1)) / denominator;
} }
} }
...@@ -119,14 +124,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev, ...@@ -119,14 +124,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
return -EINVAL; return -EINVAL;
for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], opcode, true); hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
} }
hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
if (ret) if (ret)
return ret; return ret;
...@@ -219,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) ...@@ -219,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
trans_gap = pause_param->pause_trans_gap; trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time); trans_time = le16_to_cpu(pause_param->pause_trans_time);
return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
trans_time);
} }
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
...@@ -361,14 +364,27 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, ...@@ -361,14 +364,27 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
} }
static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
u8 bs_b, u8 bs_s)
{
u32 shapping_para = 0;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
return shapping_para;
}
static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pg_id, enum hclge_shap_bucket bucket, u8 pg_id,
u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) u32 shapping_para)
{ {
struct hclge_pg_shapping_cmd *shap_cfg_cmd; struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode; enum hclge_opcode_type opcode;
struct hclge_desc desc; struct hclge_desc desc;
u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
HCLGE_OPC_TM_PG_C_SHAPPING; HCLGE_OPC_TM_PG_C_SHAPPING;
...@@ -378,12 +394,6 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, ...@@ -378,12 +394,6 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id; shap_cfg_cmd->pg_id = pg_id;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -406,11 +416,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) ...@@ -406,11 +416,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
hclge_tm_set_field(shapping_para, IR_B, ir_b); shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
hclge_tm_set_field(shapping_para, IR_U, ir_u); HCLGE_SHAPER_BS_U_DEF,
hclge_tm_set_field(shapping_para, IR_S, ir_s); HCLGE_SHAPER_BS_S_DEF);
hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
...@@ -419,13 +427,11 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) ...@@ -419,13 +427,11 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pri_id, enum hclge_shap_bucket bucket, u8 pri_id,
u8 ir_b, u8 ir_u, u8 ir_s, u32 shapping_para)
u8 bs_b, u8 bs_s)
{ {
struct hclge_pri_shapping_cmd *shap_cfg_cmd; struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode; enum hclge_opcode_type opcode;
struct hclge_desc desc; struct hclge_desc desc;
u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
HCLGE_OPC_TM_PRI_C_SHAPPING; HCLGE_OPC_TM_PRI_C_SHAPPING;
...@@ -436,12 +442,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, ...@@ -436,12 +442,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id; shap_cfg_cmd->pri_id = pri_id;
hclge_tm_set_field(shapping_para, IR_B, ir_b);
hclge_tm_set_field(shapping_para, IR_U, ir_u);
hclge_tm_set_field(shapping_para, IR_S, ir_s);
hclge_tm_set_field(shapping_para, BS_B, bs_b);
hclge_tm_set_field(shapping_para, BS_S, bs_s);
shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
max_rss_size = min_t(u16, hdev->rss_size_max, max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc); vport->alloc_tqps / kinfo->num_tc);
/* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) { kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
...@@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->rss_size = kinfo->req_rss_size; kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size || } else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
/* Set to the maximum specification value (max_rss_size). */
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size); kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size; kinfo->rss_size = max_rss_size;
...@@ -604,12 +606,13 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) ...@@ -604,12 +606,13 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
static void hclge_tm_pg_info_init(struct hclge_dev *hdev) static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
{ {
#define BW_PERCENT 100
u8 i; u8 i;
for (i = 0; i < hdev->tm_info.num_pg; i++) { for (i = 0; i < hdev->tm_info.num_pg; i++) {
int k; int k;
hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
hdev->tm_info.pg_info[i].pg_id = i; hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
...@@ -621,7 +624,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) ...@@ -621,7 +624,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++) for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
} }
} }
...@@ -682,6 +685,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) ...@@ -682,6 +685,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{ {
u8 ir_u, ir_b, ir_s; u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret; int ret;
u32 i; u32 i;
...@@ -699,18 +703,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) ...@@ -699,18 +703,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev, ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_C_BUCKET, i, HCLGE_TM_SHAP_C_BUCKET, i,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF, shaper_para);
HCLGE_SHAPER_BS_S_DEF);
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pg_shapping_cfg(hdev, shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
HCLGE_TM_SHAP_P_BUCKET, i,
ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
shaper_para);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -730,8 +737,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) ...@@ -730,8 +737,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
/* pg to prio */ /* pg to prio */
for (i = 0; i < hdev->tm_info.num_pg; i++) { for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Cfg dwrr */ /* Cfg dwrr */
ret = hclge_tm_pg_weight_cfg(hdev, i, ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
hdev->tm_info.pg_dwrr[i]);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -811,6 +817,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) ...@@ -811,6 +817,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{ {
u8 ir_u, ir_b, ir_s; u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret; int ret;
u32 i; u32 i;
...@@ -822,17 +829,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) ...@@ -822,17 +829,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg( shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
hdev, HCLGE_TM_SHAP_C_BUCKET, i, HCLGE_SHAPER_BS_U_DEF,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
shaper_para);
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg( shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
hdev, HCLGE_TM_SHAP_P_BUCKET, i, HCLGE_SHAPER_BS_U_DEF,
ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
shaper_para);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -844,6 +853,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) ...@@ -844,6 +853,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 ir_u, ir_b, ir_s; u8 ir_u, ir_b, ir_s;
u32 shaper_para;
int ret; int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
...@@ -851,18 +861,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) ...@@ -851,18 +861,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
vport->vport_id, HCLGE_SHAPER_BS_U_DEF,
0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
vport->vport_id, shaper_para);
if (ret) if (ret)
return ret; return ret;
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
vport->vport_id,
ir_b, ir_u, ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
vport->vport_id, shaper_para);
if (ret) if (ret)
return ret; return ret;
...@@ -1333,8 +1344,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) ...@@ -1333,8 +1344,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
ret = hclge_pfc_setup_hw(hdev); ret = hclge_pfc_setup_hw(hdev);
if (init && ret == -EOPNOTSUPP) if (init && ret == -EOPNOTSUPP)
dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
else else if (ret) {
dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
ret);
return ret; return ret;
}
return hclge_tm_bp_setup(hdev); return hclge_tm_bp_setup(hdev);
} }
...@@ -1357,7 +1371,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) ...@@ -1357,7 +1371,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{ {
u8 i, bit_map = 0; u8 bit_map = 0;
u8 i;
hdev->tm_info.num_tc = num_tc; hdev->tm_info.num_tc = num_tc;
......
...@@ -81,7 +81,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) ...@@ -81,7 +81,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
if (ring->flag == HCLGEVF_TYPE_CSQ) { if (ring->flag == HCLGEVF_TYPE_CSQ) {
reg_val = (u32)ring->desc_dma_addr; reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); reg_val = (u32)(ring->desc_dma_addr >>
HCLGEVF_RING_BASEADDR_SHIFT);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
...@@ -93,7 +94,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) ...@@ -93,7 +94,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
} else { } else {
reg_val = (u32)ring->desc_dma_addr; reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); reg_val = (u32)(ring->desc_dma_addr >>
HCLGEVF_RING_BASEADDR_SHIFT);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
...@@ -363,8 +365,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) ...@@ -363,8 +365,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
return 0; return 0;
} }
static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
{
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
}
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
{ {
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock_bh(&hdev->hw.cmq.crq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
hclgevf_cmd_uninit_regs(&hdev->hw);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
} }
...@@ -242,6 +242,8 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { ...@@ -242,6 +242,8 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
#define HCLGEVF_RING_BASEADDR_SHIFT 32
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{ {
writel(value, base + reg); writel(value, base + reg);
......
...@@ -47,8 +47,7 @@ static const u8 hclgevf_hash_key[] = { ...@@ -47,8 +47,7 @@ static const u8 hclgevf_hash_key[] = {
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
static inline struct hclgevf_dev *hclgevf_ae_get_hdev( static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
struct hnae3_handle *handle)
{ {
if (!handle->client) if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic); return container_of(handle, struct hclgevf_dev, nic);
...@@ -179,10 +178,8 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) ...@@ -179,10 +178,8 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
u8 *data) u8 *data)
{ {
u8 *p = (char *)data;
if (strset == ETH_SS_STATS) if (strset == ETH_SS_STATS)
p = hclgevf_tqps_get_strings(handle, p); (void)hclgevf_tqps_get_strings(handle, data);
} }
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
...@@ -196,7 +193,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) ...@@ -196,7 +193,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
int status; int status;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
true, &resp_msg, sizeof(u8)); true, &resp_msg, sizeof(resp_msg));
if (status) { if (status) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d", "VF request to get TC info from PF failed %d",
...@@ -285,13 +282,33 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) ...@@ -285,13 +282,33 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
2, true, resp_data, 2); sizeof(msg_data), true, resp_data,
sizeof(resp_data));
if (!ret) if (!ret)
qid_in_pf = *(u16 *)resp_data; qid_in_pf = *(u16 *)resp_data;
return qid_in_pf; return qid_in_pf;
} }
static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
{
u8 resp_msg;
int ret;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
true, &resp_msg, sizeof(resp_msg));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get the pf port media type failed %d",
ret);
return ret;
}
hdev->hw.mac.media_type = resp_msg;
return 0;
}
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{ {
struct hclgevf_tqp *tqp; struct hclgevf_tqp *tqp;
...@@ -362,7 +379,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev) ...@@ -362,7 +379,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
u8 resp_msg; u8 resp_msg;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
0, false, &resp_msg, sizeof(u8)); 0, false, &resp_msg, sizeof(resp_msg));
if (status) if (status)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status); "VF failed to fetch link status(%d) from PF", status);
...@@ -397,11 +414,13 @@ void hclgevf_update_link_mode(struct hclgevf_dev *hdev) ...@@ -397,11 +414,13 @@ void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
u8 resp_msg; u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING; send_msg = HCLGEVF_ADVERTISING;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
sizeof(u8), false, &resp_msg, sizeof(u8)); &send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
send_msg = HCLGEVF_SUPPORTED; send_msg = HCLGEVF_SUPPORTED;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
sizeof(u8), false, &resp_msg, sizeof(u8)); &send_msg, sizeof(send_msg), false,
&resp_msg, sizeof(resp_msg));
} }
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
...@@ -489,13 +508,14 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, ...@@ -489,13 +508,14 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
{ {
struct hclgevf_rss_config_cmd *req; struct hclgevf_rss_config_cmd *req;
struct hclgevf_desc desc; struct hclgevf_desc desc;
int key_offset; int key_offset = 0;
int key_counts;
int key_size; int key_size;
int ret; int ret;
key_counts = HCLGEVF_RSS_KEY_SIZE;
req = (struct hclgevf_rss_config_cmd *)desc.data; req = (struct hclgevf_rss_config_cmd *)desc.data;
while (key_counts) {
for (key_offset = 0; key_offset < 3; key_offset++) {
hclgevf_cmd_setup_basic_desc(&desc, hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_RSS_GENERIC_CONFIG, HCLGEVF_OPC_RSS_GENERIC_CONFIG,
false); false);
...@@ -504,15 +524,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, ...@@ -504,15 +524,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
req->hash_config |= req->hash_config |=
(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
if (key_offset == 2) if (key_counts >= HCLGEVF_RSS_HASH_KEY_NUM)
key_size =
HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
else
key_size = HCLGEVF_RSS_HASH_KEY_NUM; key_size = HCLGEVF_RSS_HASH_KEY_NUM;
else
key_size = key_counts;
memcpy(req->hash_key, memcpy(req->hash_key,
key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -1137,7 +1157,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, ...@@ -1137,7 +1157,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
HCLGE_MBX_MAC_VLAN_UC_MODIFY; HCLGE_MBX_MAC_VLAN_UC_MODIFY;
status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
subcode, msg_data, ETH_ALEN * 2, subcode, msg_data, sizeof(msg_data),
true, NULL, 0); true, NULL, 0);
if (!status) if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
...@@ -1193,7 +1213,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1193,7 +1213,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
if (vlan_id > 4095) if (vlan_id > MAX_VLAN_ID)
return -EINVAL; return -EINVAL;
if (proto != htons(ETH_P_8021Q)) if (proto != htons(ETH_P_8021Q))
...@@ -1227,7 +1247,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1227,7 +1247,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
return 0; return 0;
memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); memcpy(msg_data, &queue_id, sizeof(queue_id));
/* disable vf queue before send queue reset msg to PF */ /* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
...@@ -1235,7 +1255,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) ...@@ -1235,7 +1255,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
return ret; return ret;
return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
2, true, NULL, 0); sizeof(msg_data), true, NULL, 0);
} }
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
...@@ -1371,7 +1391,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) ...@@ -1371,7 +1391,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
switch (hdev->reset_type) { switch (hdev->reset_type) {
case HNAE3_VF_FUNC_RESET: case HNAE3_VF_FUNC_RESET:
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8)); 0, true, NULL, 0);
break; break;
case HNAE3_FLR_RESET: case HNAE3_FLR_RESET:
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
...@@ -1493,7 +1513,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev, ...@@ -1493,7 +1513,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclgevf_dev *hdev = ae_dev->priv; struct hclgevf_dev *hdev = ae_dev->priv;
if (time_before(jiffies, (hdev->last_reset_time + 5 * HZ))) if (time_before(jiffies, (hdev->last_reset_time +
HCLGEVF_RESET_TASK_INTERVAL * HZ)))
return; return;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
...@@ -1603,8 +1624,10 @@ static void hclgevf_service_timer(struct timer_list *t) ...@@ -1603,8 +1624,10 @@ static void hclgevf_service_timer(struct timer_list *t)
{ {
struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + 5 * HZ); mod_timer(&hdev->service_timer, jiffies +
HCLGEVF_GENERAL_TASK_INTERVAL * HZ);
hdev->stats_timer++;
hclgevf_task_schedule(hdev); hclgevf_task_schedule(hdev);
} }
...@@ -1623,8 +1646,8 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1623,8 +1646,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
&hdev->reset_state)) { &hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware. /* PF has initmated that it is about to reset the hardware.
* We now have to poll & check if hardware has actually * We now have to poll & check if hardware has actually
* completed the reset sequence. On hardware reset * completed the reset sequence. On hardware reset completion,
* completion, VF needs to reset the client and ae device. * VF needs to reset the client and ae device.
*/ */
hdev->reset_attempts = 0; hdev->reset_attempts = 0;
...@@ -1640,7 +1663,7 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1640,7 +1663,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) { &hdev->reset_state)) {
/* we could be here when either of below happens: /* we could be here when either of below happens:
* 1. reset was initiated due to watchdog timeout due to * 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and * a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF * which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable. * reset. This also means our cmdq would be unreliable.
...@@ -1702,7 +1725,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t) ...@@ -1702,7 +1725,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t)
struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
schedule_work(&hdev->keep_alive_task); schedule_work(&hdev->keep_alive_task);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
} }
static void hclgevf_keep_alive_task(struct work_struct *work) static void hclgevf_keep_alive_task(struct work_struct *work)
...@@ -1713,11 +1737,11 @@ static void hclgevf_keep_alive_task(struct work_struct *work) ...@@ -1713,11 +1737,11 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
hdev = container_of(work, struct hclgevf_dev, keep_alive_task); hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return; return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
0, false, &respmsg, sizeof(u8)); 0, false, &respmsg, sizeof(respmsg));
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret); "VF sends keep alive cmd failed(=%d)\n", ret);
...@@ -1725,9 +1749,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work) ...@@ -1725,9 +1749,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
static void hclgevf_service_task(struct work_struct *work) static void hclgevf_service_task(struct work_struct *work)
{ {
struct hnae3_handle *handle;
struct hclgevf_dev *hdev; struct hclgevf_dev *hdev;
hdev = container_of(work, struct hclgevf_dev, service_task); hdev = container_of(work, struct hclgevf_dev, service_task);
handle = &hdev->nic;
if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
hclgevf_tqps_update_stats(handle);
hdev->stats_timer = 0;
}
/* request the link status from the PF. PF would be able to tell VF /* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later * about such updates in future so we might remove this later
...@@ -1831,6 +1862,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) ...@@ -1831,6 +1862,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret) if (ret)
return ret; return ret;
ret = hclgevf_get_pf_media_type(hdev);
if (ret)
return ret;
/* get tc configuration from PF */ /* get tc configuration from PF */
return hclgevf_get_tc_info(hdev); return hclgevf_get_tc_info(hdev);
} }
...@@ -1932,7 +1967,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -1932,7 +1967,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
return ret; return ret;
} }
/* Initialize RSS indirect table for each vport */ /* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
...@@ -1945,9 +1980,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -1945,9 +1980,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{ {
/* other vlan config(like, VLAN TX/RX offload) would also be added
* here later
*/
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false); false);
} }
...@@ -1969,7 +2001,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) ...@@ -1969,7 +2001,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev); hclgevf_request_link_info(hdev);
...@@ -1991,7 +2022,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) ...@@ -1991,7 +2022,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
for (i = 0; i < handle->kinfo.num_tqps; i++) for (i = 0; i < handle->kinfo.num_tqps; i++)
hclgevf_reset_tqp(handle, i); hclgevf_reset_tqp(handle, i);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0); hclgevf_update_link_status(hdev, 0);
} }
...@@ -2010,7 +2040,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle) ...@@ -2010,7 +2040,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); mod_timer(&hdev->keep_alive_timer, jiffies +
HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ);
return hclgevf_set_alive(handle, true); return hclgevf_set_alive(handle, true);
} }
......
...@@ -12,10 +12,15 @@ ...@@ -12,10 +12,15 @@
#define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf" #define HCLGEVF_DRIVER_NAME "hclgevf"
#define MAX_VLAN_ID 4095
#define HCLGEVF_MISC_VECTOR_NUM 0 #define HCLGEVF_MISC_VECTOR_NUM 0
#define HCLGEVF_INVALID_VPORT 0xffff #define HCLGEVF_INVALID_VPORT 0xffff
#define HCLGEVF_RESET_TASK_INTERVAL 5
#define HCLGEVF_GENERAL_TASK_INTERVAL 5
#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
/* This number in actual depends upon the total number of VFs /* This number in actual depends upon the total number of VFs
* created by physical function. But the maximum number of * created by physical function. But the maximum number of
* possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
...@@ -62,6 +67,8 @@ ...@@ -62,6 +67,8 @@
#define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4) #define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_STATS_TIMER_INTERVAL (36)
enum hclgevf_evt_cause { enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX, HCLGEVF_VECTOR0_EVENT_MBX,
...@@ -220,6 +227,7 @@ struct hclgevf_dev { ...@@ -220,6 +227,7 @@ struct hclgevf_dev {
struct hnae3_client *nic_client; struct hnae3_client *nic_client;
struct hnae3_client *roce_client; struct hnae3_client *roce_client;
u32 flag; u32 flag;
u32 stats_timer;
}; };
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
......
...@@ -273,7 +273,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -273,7 +273,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]); link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed)); memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]); duplex = (u8)le16_to_cpu(msg_q[4]);
hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */ /* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status); hclgevf_update_link_status(hdev, link_status);
......
...@@ -378,5 +378,19 @@ static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) ...@@ -378,5 +378,19 @@ static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
#else #else
#endif #endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 4))
#include <linux/bitmap.h>
static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
{
__set_bit(nr, addr);
}
#else
#define HAS_LINK_MODE_OPS
#endif
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册