diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 91dbe2a6e056fac2042b893aab689751a9faacc2..bb274397f91cd9bd1c22fb4dc3190d0aa965e69f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -44,6 +44,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ }; @@ -68,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode { }; #define HCLGE_MBX_MAX_MSG_SIZE 16 -#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 @@ -86,6 +87,7 @@ struct hclge_mbx_vf_to_pf_cmd { u8 rsv1[2]; u8 msg_len; u8 rsv2[3]; + /* msg[0] means opcode and msg[1] means sub opcode, other is msg data */ u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; }; @@ -94,6 +96,7 @@ struct hclge_mbx_pf_to_vf_cmd { u8 rsv[3]; u8 msg_len; u8 rsv1[3]; + /* msg[0] means OPCODE, other is msg data */ u16 msg[8]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 50011aafbae4dcd9ec6eb673a6704af3a0a6c548..17ab4f4af6ad52259e3d0311ed2c751271579af7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, void hnae3_set_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, int inited) { + if (!client || !ae_dev) + return; + switch (client->type) { case HNAE3_CLIENT_KNIC: hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); @@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client) struct hnae3_ae_dev *ae_dev; int ret = 0; + if (!client) + return -ENODEV; + mutex_lock(&hnae3_common_lock); /* one system should only have one client for every type */ list_for_each_entry(client_tmp, &hnae3_client_list, node) { @@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client) { struct hnae3_ae_dev *ae_dev; + if (!client) + return; + mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_client *client; int ret = 0; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); @@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) if (!id) continue; - /* ae_dev init should set flag */ + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + continue; + } ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) continue; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and @@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -245,6 +265,9 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) struct hnae3_client *client; int ret = 0; + if (!ae_dev) + return -ENODEV; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); @@ -255,15 +278,13 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!id) continue; - ae_dev->ops = ae_algo->ops; - - if (!ae_dev->ops) { - dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); ret = -EOPNOTSUPP; goto out_err; } + ae_dev->ops = ae_algo->ops; - /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -271,6 +292,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) goto out_err; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -307,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; + if (!ae_dev) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index b6fabbbdfd5bb0c0e0f286eecc65f5bac2963762..d2ec4c573bf86ff98f2556da3036e31cd5d7a28b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -4,8 +4,7 @@ #include "hnae3.h" #include "hns3_enet.h" -static -int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) +static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); @@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) return -EOPNOTSUPP; } -static -int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) +static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); @@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) return -EOPNOTSUPP; } -static -int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); @@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) return -EOPNOTSUPP; } -static -int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 75ffa47e52526a389d074c49be04c1b9d0c2e17c..2e917a2099d4f967d51e8f3383b5e983de578ff9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -146,8 +146,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, - tqp_vectors->name, - tqp_vectors); + tqp_vectors->name, tqp_vectors); if (ret) { netdev_err(priv->netdev, "request irq(%d) fail\n", tqp_vectors->vector_irq); @@ -290,8 +289,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) ret = netif_set_real_num_tx_queues(netdev, queue_size); if (ret) { netdev_err(netdev, - "netif_set_real_num_tx_queues fail, ret=%d!\n", - ret); + "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); return ret; } @@ -347,7 +345,7 @@ static int hns3_nic_net_up(struct net_device *netdev) /* get irq resource for all vectors */ ret = hns3_nic_init_irq(priv); if (ret) { - netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); + netdev_err(netdev, "init irq failed! ret=%d\n", ret); return ret; } @@ -422,16 +420,13 @@ static int hns3_nic_net_open(struct net_device *netdev) ret = hns3_nic_net_up(netdev); if (ret) { set_bit(HNS3_NIC_STATE_DOWN, &priv->state); - netdev_err(netdev, - "hns net up fail, ret=%d!\n", ret); + netdev_err(netdev, "net up fail, ret=%d!\n", ret); return ret; } kinfo = &h->kinfo; - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - netdev_set_prio_tc_map(netdev, i, - kinfo->prio_tc[i]); - } + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); if (h->ae_algo->ops->enable_timer_task) h->ae_algo->ops->enable_timer_task(priv->ae_handle, true); @@ -635,7 +630,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, if (l3.v4->version == 4) l3.v4->check = 0; - /* tunnel packet.*/ + /* tunnel packet */ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_UDP_TUNNEL | @@ -665,11 +660,11 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, l3.v4->check = 0; } - /* normal or tunnel packet*/ + /* normal or tunnel packet */ l4_offset = l4.hdr - skb->data; hdr_len = (l4.tcp->doff << 2) + l4_offset; - /* remove payload length from inner pseudo checksum when tso*/ + /* remove payload length from inner pseudo checksum when tso */ l4_paylen = skb->len - l4_offset; csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(l4_paylen)); @@ -757,7 +752,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, l2_len = l3.hdr - skb->data; hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); - /* tunnel packet*/ + /* tunnel packet */ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; @@ -769,9 +764,9 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, ol3_len >> 2); - /* MAC in UDP, MAC in GRE (0x6558)*/ + /* MAC in UDP, MAC in GRE (0x6558) */ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { - /* switch MAC header ptr from outer to inner header.*/ + /* switch MAC header ptr from outer to inner header */ l2_hdr = skb_inner_mac_header(skb); /* compute OL4 header size, defined in 4 Bytes. */ @@ -893,9 +888,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, l3.hdr = skb_network_header(skb); - /* define OL3 type and tunnel type(OL4).*/ + /* define OL3 type and tunnel type(OL4) */ if (skb->encapsulation) { - /* define outer network header type.*/ + /* define outer network header type */ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) hns3_set_field(*ol_type_vlan_len_msec, @@ -911,7 +906,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_OL3T_IPV6); } - /* define tunnel type(OL4).*/ + /* define tunnel type(OL4) */ switch (l4_proto) { case IPPROTO_UDP: hns3_set_field(*ol_type_vlan_len_msec, @@ -1081,8 +1076,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* Set txbd */ desc->tx.ol_type_vlan_len_msec = cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = - cpu_to_le32(type_cs_vlan_tso); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); desc->tx.paylen = cpu_to_le32(paylen); desc->tx.mss = cpu_to_le16(mss); desc->tx.vlan_tag = cpu_to_le16(inner_vtag); @@ -1094,7 +1088,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } - if (unlikely(dma_mapping_error(ring->dev, dma))) { + if (unlikely(dma_mapping_error(dev, dma))) { ring->stats.sw_err_cnt++; return -ENOMEM; } @@ -1111,19 +1105,19 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc_cb->priv = priv; desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; desc_cb->type = (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE; + DESC_TYPE_SKB : DESC_TYPE_PAGE; /* now, fill the descriptor */ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? - (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); + (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end && (k == frag_buf_num - 1) ? 1 : 0); desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); - /* move ring pointer to next.*/ + /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use); desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -1644,7 +1638,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, if (h->ae_algo->ops->set_vf_vlan_filter) ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, - qos, vlan_proto); + qos, vlan_proto); return ret; } @@ -1673,7 +1667,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hns3_enet_ring *tx_ring = NULL; + struct hns3_enet_ring *tx_ring; int timeout_queue = 0; int hw_head, hw_tail; int i; @@ -1816,8 +1810,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct hnae3_ae_dev *ae_dev; int ret; - ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), - GFP_KERNEL); + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); if (!ae_dev) { ret = -ENOMEM; return ret; @@ -2017,7 +2010,6 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; if (pdev->revision > HNAE3_REVISION_ID_20) { - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; #ifdef NETIF_F_GRO_HW netdev->features |= NETIF_F_GRO_HW; netdev->hw_features |= NETIF_F_GRO_HW; @@ -2125,8 +2117,7 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) int size = ring->desc_num * sizeof(ring->desc[0]); ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, - &ring->desc_dma_addr, - GFP_KERNEL); + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; @@ -2185,7 +2176,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) return ret; } -/* detach a in-used buffer and replace with a reserved one */ +/* detach a in-used buffer and replace with a reserved one */ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, struct hns3_desc_cb *res_cb) { @@ -2198,8 +2189,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) { ring->desc_cb[i].reuse_flag = 0; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma - + ring->desc_cb[i].page_offset); + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; } @@ -2210,7 +2201,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); @@ -2292,8 +2283,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring) return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; } -static void -hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) +static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, + int cleand_count) { struct hns3_desc_cb *desc_cb; struct hns3_desc_cb res_cbs; @@ -2332,48 +2323,30 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) { - struct hns3_desc *desc; - int truesize, size; - int last_offset; - bool twobufs; - - twobufs = ((PAGE_SIZE < 8192) && - hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); - - desc = &ring->desc[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - - truesize = hnae3_buf_size(ring); - - if (!twobufs) - last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + int size = le16_to_cpu(desc->rx.size); + u32 truesize = hnae3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); - /* Avoid re-using remote pages,flag default unreuse */ - if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + /* Avoid re-using remote pages, or the stack is still using the page + * when page buffer has wrap back, flag default unreuse + */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) || + (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) return; - if (twobufs) { - /* If we are only owner of page we can reuse it */ - if (likely(page_count(desc_cb->priv) == 1)) { - /* Flip page offset to other buffer */ - desc_cb->page_offset ^= truesize; - - desc_cb->reuse_flag = 1; - /* bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } - return; - } - /* Move offset up to the next cache line */ desc_cb->page_offset += truesize; - if (desc_cb->page_offset <= last_offset) { + if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { desc_cb->reuse_flag = 1; - /* Bump ref count on page before it is given*/ + /* Bump ref count on page before it is given */ + get_page(desc_cb->priv); + } else if (page_count(desc_cb->priv) == 1) { + desc_cb->reuse_flag = 1; + desc_cb->page_offset = 0; get_page(desc_cb->priv); } } @@ -2525,7 +2498,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); /* We can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + if (likely(page_to_nid(desc_cb->priv) == numa_mem_id())) desc_cb->reuse_flag = 1; else /* This page cannot be reused so discard it */ put_page(desc_cb->priv); @@ -2559,7 +2532,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, if (pending) { ntc = (ring->next_to_clean - 1 + ring->desc_num) % - ring->desc_num; + ring->desc_num; pre_desc = &ring->desc[ntc]; bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info); } else { @@ -2648,8 +2621,7 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, */ NAPI_GRO_CB(skb)->count = gro_count; - l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); if (l3_type == HNS3_L3_TYPE_IPV4) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; else if (l3_type == HNS3_L3_TYPE_IPV6) @@ -2795,9 +2767,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, return 0; } -int hns3_clean_rx_ring( - struct hns3_enet_ring *ring, int budget, - void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) +int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; @@ -2851,8 +2822,7 @@ int hns3_clean_rx_ring( out: /* Make all data has been write before submit */ if (clean_count + unused_count > 0) - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); + hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count); return recv_pkts; } @@ -3309,10 +3279,8 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) { - irq_set_affinity_notifier(tqp_vector->vector_irq, - NULL); - irq_set_affinity_hint(tqp_vector->vector_irq, - NULL); + irq_set_affinity_notifier(tqp_vector->vector_irq, NULL); + irq_set_affinity_hint(tqp_vector->vector_irq, NULL); free_irq(priv->tqp_vector[i].vector_irq, &priv->tqp_vector[i]); tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -3453,8 +3421,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) if (ring->desc_num <= 0 || ring->buf_size <= 0) return -EINVAL; - ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), - GFP_KERNEL); + ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, + sizeof(ring->desc_cb[0]), GFP_KERNEL); if (!ring->desc_cb) { ret = -ENOMEM; goto out; @@ -3475,7 +3443,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) out_with_desc: hns3_free_desc(ring); out_with_desc_cb: - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; out: return ret; @@ -3484,7 +3452,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) static void hns3_fini_ring(struct hns3_enet_ring *ring) { hns3_free_desc(ring); - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; @@ -3525,8 +3493,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) struct hnae3_queue *q = ring->tqp; if (!HNAE3_IS_TX_RING(ring)) { - hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, - (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); @@ -3773,7 +3740,7 @@ static int hns3_client_init(struct hnae3_handle *handle) ret = hns3_client_start(handle); if (ret) { dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); - goto out_reg_netdev_fail; + goto out_client_start; } hns3_dcbnl_setup(handle); @@ -3789,6 +3756,8 @@ static int hns3_client_init(struct hnae3_handle *handle) return ret; +out_client_start: + unregister_netdev(netdev); out_reg_netdev_fail: hns3_uninit_phy(netdev); out_init_phy: @@ -3958,8 +3927,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ret); return ret; } - hns3_replace_buffer(ring, ring->next_to_use, - &res_cbs); + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); } ring_ptr_move_fw(ring, next_to_use); } @@ -4130,7 +4098,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) ret = hns3_nic_net_open(kinfo->netdev); if (ret) { netdev_err(kinfo->netdev, - "hns net up fail, ret=%d!\n", ret); + "net up fail, ret=%d!\n", ret); set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); return ret; } @@ -4171,6 +4139,12 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_uninit_vector; + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto err_uninit_vector; + } + set_bit(HNS3_NIC_STATE_INITED, &priv->state); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 79ab2b6afd5b7399bd9fc45b6de89c7ba01b1a4d..099f1eb939887cc919ebd1cd6ee563422df157a7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -398,7 +398,6 @@ struct hns3_enet_ring { struct hns3_enet_ring *next; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_queue *tqp; - char ring_name[HNS3_RING_NAME_LEN]; struct device *dev; /* will be used for DMA mapping of descriptors */ /* statistic */ @@ -408,9 +407,6 @@ struct hns3_enet_ring { dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ - u16 max_desc_num_per_pkt; - u16 max_raw_data_sz_per_desc; - u16 max_pkt_size; int next_to_use; /* idx of next spare desc */ /* idx of lastest sent desc, the ring is empty when equal to @@ -424,9 +420,6 @@ struct hns3_enet_ring { u32 flag; /* ring attribute */ - int numa_node; - cpumask_t affinity_mask; - int pending_buf; struct sk_buff *skb; struct sk_buff *tail_skb; @@ -634,7 +627,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) -#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) +#define ring_to_dev(ring) ((ring)->dev) #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ DMA_TO_DEVICE : DMA_FROM_DEVICE) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 620cb491843a6a5e2b0cf24b08ca6c99ae73b3e6..3e534317fd6592e6fcbe609ff924093508ea20bc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -58,6 +58,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128 +#define HNS3_NIC_LB_SETUP_USEC 10000 /* Nic loopback test err */ #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 @@ -115,7 +116,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) return ret; ret = hns3_lp_setup(ndev, loop_mode, true); - usleep_range(10000, 20000); + usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2); return ret; } @@ -130,7 +131,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) return ret; } - usleep_range(10000, 20000); + usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2); return 0; } @@ -152,6 +153,12 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + + /* The dst mac addr of loopback packet is the same as the host' + * mac addr, the SSU component may loop back the packet to host + * before the packet reaches mac or serdes, which will defect + * the purpose of mac or serdes selftest. + */ ethh->h_dest[5] += 0x1f; eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index c8baf58a19fd8e65c0ec47a49ce5d90fbfb73c46..f26aeea86081fb467b697a721439b9d8a8ceb312 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -209,12 +209,14 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, retval = -EPERM; else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED) retval = -EOPNOTSUPP; + else if (desc_ret == HCLGE_CMD_QUEUE_ILLEGAL) + retval = -ENXIO; else retval = -EIO; hw->cmq.last_status = desc_ret; (*ntc)++; handle++; - if (*ntc == hw->cmq.csq.desc_num) + if (*ntc >= hw->cmq.csq.desc_num) *ntc = 0; } return retval; @@ -257,7 +259,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; *desc_to_use = desc[handle]; (hw->cmq.csq.next_to_use)++; - if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) hw->cmq.csq.next_to_use = 0; handle++; } @@ -393,6 +395,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) return 0; } +static void hclge_cmd_uninit_regs(struct hclge_hw *hw) +{ + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); +} + static void hclge_destroy_queue(struct hclge_cmq_ring *ring) { spin_lock(&ring->lock); @@ -405,3 +421,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw) hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.crq); } + +void hclge_cmd_uninit(struct hclge_dev *hdev) +{ + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock_bh(&hdev->hw.cmq.crq.lock); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hclge_cmd_uninit_regs(&hdev->hw); + spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + hclge_destroy_cmd_queue(&hdev->hw); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 59245fa0bd4530798ad1dc003443c770ee38b9f1..ae480788a2218ae93c137eeea80b2d403bef7b2c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -41,6 +41,7 @@ enum hclge_cmd_return_status { HCLGE_CMD_NO_AUTH = 1, HCLGE_CMD_NOT_SUPPORTED = 2, HCLGE_CMD_QUEUE_FULL = 3, + HCLGE_CMD_QUEUE_ILLEGAL = 10, }; enum hclge_cmd_status { @@ -318,16 +319,16 @@ struct hclge_ctrl_vector_chain_cmd { u8 rsv; }; -#define HCLGE_TC_NUM 8 +#define HCLGE_MAX_TC_NUM 8 #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ struct hclge_tx_buff_alloc_cmd { - __le16 tx_pkt_buff[HCLGE_TC_NUM]; + __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM]; u8 tx_buff_rsv[8]; }; struct hclge_rx_priv_buff_cmd { - __le16 buf_num[HCLGE_TC_NUM]; + __le16 buf_num[HCLGE_MAX_TC_NUM]; __le16 shared_buf; u8 rsv[6]; }; @@ -373,7 +374,6 @@ struct hclge_priv_buf { u32 enable; /* Enable TC private buffer or not */ }; -#define HCLGE_MAX_TC_NUM 8 struct hclge_shared_buf { struct hclge_waterline self; struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; @@ -620,6 +620,11 @@ enum hclge_mac_vlan_tbl_opcode { HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ }; +enum hclge_mac_vlan_add_resp_code { + HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */ + HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */ +}; + #define HCLGE_MAC_VLAN_BIT0_EN_B 0 #define HCLGE_MAC_VLAN_BIT1_EN_B 1 #define HCLGE_MAC_EPORT_SW_EN_B 12 @@ -732,7 +737,9 @@ struct hclge_mac_ethertype_idx_rd_cmd { struct hclge_vlan_filter_ctrl_cmd { u8 vlan_type; u8 vlan_fe; - u8 rsv[22]; + u8 rsv1[2]; + u8 vf_id; + u8 rsv2[19]; }; struct hclge_vlan_filter_pf_cfg_cmd { @@ -996,6 +1003,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, struct hclge_desc *desc); -void hclge_destroy_cmd_queue(struct hclge_hw *hw); +void hclge_cmd_uninit(struct hclge_dev *hdev); int hclge_cmd_queue_init(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 5df3743d80cf5da94ce61a4db9e0de8cffbf21a4..7a77696f29ea26a514eec81730c8e1fae99e8c57 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -42,6 +42,8 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) { struct hclge_desc desc[4]; + int entries_per_desc; + int index; int ret; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); @@ -59,7 +61,9 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) return ret; } - return (int)desc[offset / 6].data[offset % 6]; + entries_per_desc = ARRAY_SIZE(desc[0].data); + index = offset % entries_per_desc; + return (int)desc[offset / entries_per_desc].data[index]; } static int hclge_dbg_cmd_send(struct hclge_dev *hdev, @@ -96,12 +100,13 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, { struct hclge_desc *desc_src; struct hclge_desc *desc; + int entries_per_desc; int bd_num, buf_len; int ret, i; int index; int max; - ret = kstrtouint(cmd_buf, 10, &index); + ret = kstrtouint(cmd_buf, 0, &index); index = (ret != 0) ? 0 : index; bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset); @@ -125,14 +130,18 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, return; } - max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num; + entries_per_desc = ARRAY_SIZE(desc->data); + + max = (bd_num * entries_per_desc) <= msg_num ? + (bd_num * entries_per_desc) : msg_num; desc = desc_src; for (i = 0; i < max; i++) { - (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc; + ((i > 0) && ((i % entries_per_desc) == 0)) ? desc++ : desc; if (dfx_message->flag) dev_info(&hdev->pdev->dev, "%s: 0x%x\n", - dfx_message->message, desc->data[i % 6]); + dfx_message->message, + desc->data[i % entries_per_desc]); dfx_message++; } @@ -244,92 +253,92 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf) { int msg_num; - if (strncmp(&cmd_buf[9], "bios common", 11) == 0) { + if (strncmp(cmd_buf, "bios common", 11) == 0) { msg_num = sizeof(hclge_dbg_bios_common_reg) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg, - &cmd_buf[21], msg_num, - HCLGE_DBG_DFX_BIOS_OFFSET, + &cmd_buf[sizeof("bios common")], + msg_num, HCLGE_DBG_DFX_BIOS_OFFSET, HCLGE_OPC_DFX_BIOS_COMMON_REG); - } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) { + } else if (strncmp(cmd_buf, "ssu", 3) == 0) { msg_num = sizeof(hclge_dbg_ssu_reg_0) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("ssu")], msg_num, HCLGE_DBG_DFX_SSU_0_OFFSET, HCLGE_OPC_DFX_SSU_REG_0); msg_num = sizeof(hclge_dbg_ssu_reg_1) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("ssu")], msg_num, HCLGE_DBG_DFX_SSU_1_OFFSET, HCLGE_OPC_DFX_SSU_REG_1); msg_num = sizeof(hclge_dbg_ssu_reg_2) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("ssu")], msg_num, HCLGE_DBG_DFX_SSU_2_OFFSET, HCLGE_OPC_DFX_SSU_REG_2); - } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) { + } else if (strncmp(cmd_buf, "igu egu", 7) == 0) { msg_num = sizeof(hclge_dbg_igu_egu_reg) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg, - &cmd_buf[17], msg_num, + &cmd_buf[sizeof("igu egu")], msg_num, HCLGE_DBG_DFX_IGU_OFFSET, HCLGE_OPC_DFX_IGU_EGU_REG); - } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) { + } else if (strncmp(cmd_buf, "rpu", 3) == 0) { msg_num = sizeof(hclge_dbg_rpu_reg_0) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("rpu")], msg_num, HCLGE_DBG_DFX_RPU_0_OFFSET, HCLGE_OPC_DFX_RPU_REG_0); msg_num = sizeof(hclge_dbg_rpu_reg_1) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("rpu")], msg_num, HCLGE_DBG_DFX_RPU_1_OFFSET, HCLGE_OPC_DFX_RPU_REG_1); - } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) { + } else if (strncmp(cmd_buf, "ncsi", 4) == 0) { msg_num = sizeof(hclge_dbg_ncsi_reg) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg, - &cmd_buf[14], msg_num, + &cmd_buf[sizeof("ncsi")], msg_num, HCLGE_DBG_DFX_NCSI_OFFSET, HCLGE_OPC_DFX_NCSI_REG); - } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) { + } else if (strncmp(cmd_buf, "rtc", 3) == 0) { msg_num = sizeof(hclge_dbg_rtc_reg) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("rtc")], msg_num, HCLGE_DBG_DFX_RTC_OFFSET, HCLGE_OPC_DFX_RTC_REG); - } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) { + } else if (strncmp(cmd_buf, "ppp", 3) == 0) { msg_num = sizeof(hclge_dbg_ppp_reg) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("ppp")], msg_num, HCLGE_DBG_DFX_PPP_OFFSET, HCLGE_OPC_DFX_PPP_REG); - } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) { + } else if (strncmp(cmd_buf, "rcb", 3) == 0) { msg_num = sizeof(hclge_dbg_rcb_reg) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("rcb")], msg_num, HCLGE_DBG_DFX_RCB_OFFSET, HCLGE_OPC_DFX_RCB_REG); - } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) { + } else if (strncmp(cmd_buf, "tqp", 3) == 0) { msg_num = sizeof(hclge_dbg_tqp_reg) / sizeof(struct hclge_dbg_dfx_message); hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg, - &cmd_buf[13], msg_num, + &cmd_buf[sizeof("tqp")], msg_num, HCLGE_DBG_DFX_TQP_OFFSET, HCLGE_OPC_DFX_TQP_REG); - } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) { - hclge_dbg_dump_dcb(hdev, &cmd_buf[13]); + } else if (strncmp(cmd_buf, "dcb", 3) == 0) { + hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return; @@ -601,7 +610,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf) int pri_id, ret; u32 i; - ret = kstrtouint(&cmd_buf[12], 10, &queue_id); + ret = kstrtouint(cmd_buf, 0, &queue_id); queue_id = (ret != 0) ? 0 : queue_id; cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; @@ -772,7 +781,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; - for (i = 0; i < HCLGE_TC_NUM; i++) + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, tx_buf_cmd->tx_pkt_buff[i]); @@ -784,7 +793,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "\n"); rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; - for (i = 0; i < HCLGE_TC_NUM; i++) + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, rx_buf_cmd->buf_num[i]); @@ -893,8 +902,8 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) mc_tbl_idx = 0; for (i = 0; i < HCLGE_DBG_MAC_TBL_MAX; i++) { /* Prevent long-term occupation of the command channel. */ - if ((i % 100) == 0) - msleep(100); + if ((i % HCLGE_DBG_SCAN_STEP) == 0) + msleep(HCLGE_DBG_PAUSE_TIME); hclge_cmd_setup_basic_desc(&desc[0], HCLGE_PPP_MAC_VLAN_IDX_RD, true); @@ -919,6 +928,19 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) if (mac_rd_cmd->resp_code) continue; + if (mac_rd_cmd->entry_type == HCLGE_DBG_MAC_MC_TBL) { + mc_mac_tbl[mc_tbl_idx].index = i; + memcpy(mc_mac_tbl[mc_tbl_idx].mac_add, + mac_rd_cmd->mac_add, 6); + memcpy(mc_mac_tbl[mc_tbl_idx].mg_vf_mb, + desc[1].data, 24); + memcpy(&mc_mac_tbl[mc_tbl_idx].mg_vf_mb[24], + desc[2].data, 8); + mc_tbl_idx++; + + continue; + } + memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); snprintf(printf_buf, HCLGE_DBG_BUF_LEN, "|%04d |%02x:%02x:%02x:%02x:%02x:%02x |", @@ -941,17 +963,6 @@ static void hclge_dbg_dump_mac_table(struct hclge_dev *hdev) mac_rd_cmd->egress_port & HCLGE_DBG_MAC_TBL_E_PORT); dev_info(&hdev->pdev->dev, "%s", printf_buf); - - if (mac_rd_cmd->entry_type == HCLGE_DBG_MAC_MC_TBL) { - mc_mac_tbl[mc_tbl_idx].index = i; - memcpy(mc_mac_tbl[mc_tbl_idx].mac_add, - mac_rd_cmd->mac_add, 6); - memcpy(mc_mac_tbl[mc_tbl_idx].mg_vf_mb, - desc[1].data, 24); - memcpy(&mc_mac_tbl[mc_tbl_idx].mg_vf_mb[24], - desc[2].data, 8); - mc_tbl_idx++; - } } if (mc_tbl_idx > 0) { @@ -1028,7 +1039,7 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev) u32 vlan_id; int ret; - vlan_len = HCLGE_DBG_VLAN_ID_MAX / 8; + vlan_len = HCLGE_DBG_VLAN_ID_MAX / HCLGE_VLAN_BYTE_SIZE; vlan_bitmap = kzalloc(vlan_len, GFP_KERNEL); if (!vlan_bitmap) { dev_err(&hdev->pdev->dev, @@ -1038,15 +1049,15 @@ static void hclge_dbg_dump_port_vlan_table(struct hclge_dev *hdev) for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) { /* Prevent long-term occupation of the command channel. */ - if ((vlan_id % 100) == 0) - msleep(100); + if ((vlan_id % HCLGE_DBG_SCAN_STEP) == 0) + msleep(HCLGE_DBG_PAUSE_TIME); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, true); - vlan_offset = vlan_id / 160; - vlan_byte = (vlan_id % 160) / 8; - vlan_byte_val = 1 << (vlan_id % 8); + vlan_offset = vlan_id / HCLGE_VLAN_ID_B; + vlan_byte = (vlan_id % HCLGE_VLAN_ID_B) / HCLGE_VLAN_BYTE_SIZE; + vlan_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req->vlan_offset = vlan_offset; @@ -1086,7 +1097,7 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf) u16 vf_id; int ret; - ret = kstrtou16(&cmd_buf[17], 10, &vf_id); + ret = kstrtou16(cmd_buf, 0, &vf_id); if (ret) { dev_err(&hdev->pdev->dev, "vf id failed. vf id max: %d\n", hdev->num_alloc_vfs); @@ -1103,8 +1114,8 @@ static void hclge_dbg_dump_vf_vlan_table(struct hclge_dev *hdev, char *cmd_buf) for (vlan_id = 0; vlan_id < HCLGE_DBG_VLAN_ID_MAX; vlan_id++) { /* Prevent long-term occupation of the command channel. */ - if ((vlan_id % 100) == 0) - msleep(100); + if ((vlan_id % HCLGE_DBG_SCAN_STEP) == 0) + msleep(HCLGE_DBG_PAUSE_TIME); hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_VLAN_FILTER_VF_CFG, true); @@ -1237,14 +1248,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", sel_x ? "x" : "y", loc); + /* tcam_data0 ~ tcam_data1 */ req = (u32 *)req1->tcam_data; for (i = 0; i < 2; i++) dev_info(&hdev->pdev->dev, "%08x\n", *req++); + /* tcam_data2 ~ tcam_data7 */ req = (u32 *)req2->tcam_data; for (i = 0; i < 6; i++) dev_info(&hdev->pdev->dev, "%08x\n", *req++); + /* tcam_data8 ~ tcam_data12 */ req = (u32 *)req3->tcam_data; for (i = 0; i < 5; i++) dev_info(&hdev->pdev->dev, "%08x\n", *req++); @@ -1272,7 +1286,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { hclge_dbg_dump_tc(hdev); } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) { - hclge_dbg_dump_tm_map(hdev, cmd_buf); + hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof("dump tm map")]); } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { hclge_dbg_dump_tm(hdev); } else if (strncmp(cmd_buf, "dump checksum", 13) == 0) { @@ -1288,11 +1302,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) } else if (strncmp(cmd_buf, "dump port vlan tbl", 18) == 0) { hclge_dbg_dump_port_vlan_table(hdev); } else if (strncmp(cmd_buf, "dump vf vlan tbl", 16) == 0) { - hclge_dbg_dump_vf_vlan_table(hdev, cmd_buf); + int len = sizeof("dump vf vlan tbl"); + + hclge_dbg_dump_vf_vlan_table(hdev, &cmd_buf[len]); } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { hclge_dbg_dump_mng_table(hdev); } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { - hclge_dbg_dump_reg_cmd(hdev, cmd_buf); + hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof("dump reg")]); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index 21073a2930f95d6856d65f9faa1fbcc7b66e8a85..e863b478a85e97db7155c0fed0230bd528425431 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -43,6 +43,9 @@ #define HCLGE_DBG_DFX_SSU_2_OFFSET 12 +#define HCLGE_DBG_SCAN_STEP 100 +#define HCLGE_DBG_PAUSE_TIME 50 + #pragma pack(1) struct hclge_checksum_cmd { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 297a5fc67f499fa87ca70caddfed2b7f6b786615..5f28548efac57a531230ee5cf6b39a8218ebabb6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -80,7 +80,7 @@ const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, @@ -475,19 +475,19 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, enum hclge_err_int_type int_type) { struct device *dev = &hdev->pdev->dev; - int num = 1; + int desc_num = 1; int ret; hclge_cmd_setup_basic_desc(&desc[0], cmd, true); if (flag) { desc[0].flag |= cpu_to_le16(flag); hclge_cmd_setup_basic_desc(&desc[1], cmd, true); - num = 2; + desc_num = 2; } if (w_num) desc[0].data[w_num] = cpu_to_le32(int_type); - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); if (ret) dev_err(dev, "query error cmd failed (%d)\n", ret); @@ -718,7 +718,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; - int num = 1; + int desc_num = 1; int ret; /* configure PPU error interrupts */ @@ -737,7 +737,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; - num = 2; + desc_num = 2; } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); if (en) @@ -755,7 +755,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, return -EINVAL; } - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num); return ret; } @@ -940,8 +940,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; if (status) - hclge_log_error(dev, "IGU_INT_STS", - &hclge_igu_int[0], status); + hclge_log_error(dev, "IGU_INT_STS", &hclge_igu_int[0], status); /* log PPP(Programmable Packet Process) errors */ desc_data = (__le32 *)&desc[4]; @@ -1167,8 +1166,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) int ret; /* read overflow error status */ - ret = hclge_cmd_query_error(hdev, &desc[0], - HCLGE_ROCEE_PF_RAS_INT_CMD, + ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD, 0, 0, 0); if (ret) { dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); @@ -1207,10 +1205,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) return 0; } -static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +static enum hnae3_reset_type +hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) { - enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET; - struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; unsigned int status; @@ -1223,17 +1221,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); /* reset everything for now */ - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); - return ret; + return HNAE3_GLOBAL_RESET; } status = le32_to_cpu(desc[0].data[0]); - if (status & HCLGE_ROCEE_RERR_INT_MASK) + if (status & HCLGE_ROCEE_RERR_INT_MASK) { dev_warn(dev, "ROCEE RAS AXI rresp error\n"); + return HNAE3_FUNC_RESET; + } - if (status & HCLGE_ROCEE_BERR_INT_MASK) + if (status & HCLGE_ROCEE_BERR_INT_MASK) { dev_warn(dev, "ROCEE RAS AXI bresp error\n"); + return HNAE3_FUNC_RESET; + } if (status & HCLGE_ROCEE_ECC_INT_MASK) { dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); @@ -1245,9 +1246,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to process ovf error\n", ret); /* reset everything for now */ - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); - return ret; + return HNAE3_GLOBAL_RESET; } + reset_type = HNAE3_FUNC_RESET; } /* clear error status */ @@ -1256,12 +1257,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); /* reset everything for now */ - reset_type = HNAE3_GLOBAL_RESET; + return HNAE3_GLOBAL_RESET; } - HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); - - return ret; + return reset_type; } static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) @@ -1293,13 +1292,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct hclge_dev *hdev = ae_dev->priv; if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || hdev->pdev->revision < 0x21) - return HNAE3_NONE_RESET; + return reset_type; - return hclge_log_and_clear_rocee_ras_error(hdev); + reset_type = hclge_log_and_clear_rocee_ras_error(hdev); + if (reset_type != HNAE3_NONE_RESET) + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); + + return reset_type; } static const struct hclge_hw_blk hw_blk[] = { @@ -1426,8 +1430,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); if (ret) { - dev_err(dev, "query all mpf msix int cmd failed (%d)\n", - ret); + dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); goto msi_error; } @@ -1446,9 +1449,8 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) { - dev_warn(dev, - "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", - status); + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } @@ -1458,8 +1460,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); if (ret) { - dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", - ret); + dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); goto msi_error; } @@ -1472,8 +1473,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); if (ret) { - dev_err(dev, "query all pf msix int cmd failed (%d)\n", - ret); + dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); goto msi_error; } @@ -1506,8 +1506,7 @@ void hclge_handle_hw_msix_error(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); if (ret) { - dev_err(dev, "clear all pf msix int cmd failed (%d)\n", - ret); + dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6d5035685a91972ab57181a13a9301512ecd2ec9..b2ab484b997785ba3632d1e3453cc5be27daf7de 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "kcompat.h" #include "hclge_cmd.h" @@ -28,6 +29,8 @@ #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) #define HCLGE_BUF_SIZE_UNIT 256 +#define HCLGE_BUF_MUL_BY 2 +#define HCLGE_BUF_DIV_BY 2 #define HCLGE_RESET_MAX_FAIL_CNT 5 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); @@ -383,8 +386,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) queue = handle->kinfo.tqp[i]; tqp = container_of(queue, struct hclge_tqp, q); /* command : HCLGE_OPC_QUERY_IGU_STAT */ - hclge_cmd_setup_basic_desc(&desc[0], - HCLGE_OPC_QUERY_RX_STATUS, + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS, true); desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); @@ -392,7 +394,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) if (ret) { dev_err(&hdev->pdev->dev, "Query tqp stat fail, status = %d,queue = %d\n", - ret, i); + ret, i); return ret; } tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += @@ -446,6 +448,7 @@ static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; + /* each tqp has TX & RX two queues */ return kinfo->num_tqps * (2); } @@ -590,8 +593,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) return count; } -static void hclge_get_strings(struct hnae3_handle *handle, - u32 stringset, +static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, u8 *data) { u8 *p = (char *)data; @@ -599,21 +601,17 @@ static void hclge_get_strings(struct hnae3_handle *handle, if (stringset == ETH_SS_STATS) { size = ARRAY_SIZE(g_mac_stats_string); - p = hclge_comm_get_strings(stringset, - g_mac_stats_string, - size, - p); + p = hclge_comm_get_strings(stringset, g_mac_stats_string, + size, p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { - memcpy(p, - hns3_nic_test_strs[HNAE3_LOOP_APP], + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { - memcpy(p, - hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -624,8 +622,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, p += ETH_GSTRING_LEN; } if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { - memcpy(p, - hns3_nic_test_strs[HNAE3_LOOP_PHY], + memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -638,10 +635,8 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) struct hclge_dev *hdev = vport->back; u64 *p; - p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, - g_mac_stats_string, - ARRAY_SIZE(g_mac_stats_string), - data); + p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string), data); p = hclge_tqps_get_stats(handle, p); } @@ -662,6 +657,8 @@ static int hclge_parse_func_status(struct hclge_dev *hdev, static int hclge_query_function_status(struct hclge_dev *hdev) { +#define HCLGE_QUERY_MAX_CNT 5 + struct hclge_func_status_cmd *req; struct hclge_desc desc; int timeout = 0; @@ -674,9 +671,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "query function status failed %d.\n", - ret); - + "query function status failed %d.\n", ret); return ret; } @@ -684,7 +679,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) if (req->pf_state) break; usleep_range(1000, 2000); - } while (timeout++ < 5); + } while (timeout++ < HCLGE_QUERY_MAX_CNT); ret = hclge_parse_func_status(hdev, req); @@ -736,7 +731,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. */ - hdev->num_msi = hdev->num_roce_msi + + hdev->num_msi = hdev->num_roce_msi + hdev->roce_base_msix_offset; } else { hdev->num_msi = @@ -788,44 +783,44 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, unsigned long *supported = hdev->hw.mac.supported; if (speed_ability & HCLGE_SUPPORT_1G_BIT) - set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_10G_BIT) - set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_25G_BIT) - set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_40G_BIT) - set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_50G_BIT) - set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_100G_BIT) - set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + supported); - set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); #else unsigned long *supported = hdev->hw.mac.supported; if (speed_ability & HCLGE_SUPPORT_1G_BIT) - set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_10G_BIT) - set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + supported); - set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); #endif } @@ -839,22 +834,24 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, speed_ability = HCLGE_SUPPORT_GE; if (speed_ability & HCLGE_SUPPORT_1G_BIT) - set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_100M_BIT) { - set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); } if (speed_ability & HCLGE_SUPPORT_10M_BIT) { - set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); } - set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); - set_bit(SUPPORTED_Asym_Pause, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); } static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) @@ -980,6 +977,22 @@ static int hclge_get_cap(struct hclge_dev *hdev) return ret; } +static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) +{ +#define HCLGE_MIN_TX_DESC 64 +#define HCLGE_MIN_RX_DESC 64 + + if (is_kdump_kernel()) { + dev_info(&hdev->pdev->dev, + "Running kdump kernel. Using minimal resources\n"); + + /* minimal queue pairs equals to the number of vports */ + hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + hdev->num_tx_desc = HCLGE_MIN_TX_DESC; + hdev->num_rx_desc = HCLGE_MIN_RX_DESC; + } +} + static int hclge_configure(struct hclge_dev *hdev) { struct hclge_cfg cfg; @@ -1039,6 +1052,8 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + hclge_init_kdump_kernel_config(hdev); + return ret; } @@ -1301,6 +1316,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; vport->rxvlan_cfg.rx_vlan_offload_en = true; INIT_LIST_HEAD(&vport->vlan_list); + INIT_LIST_HEAD(&vport->uc_mac_list); + INIT_LIST_HEAD(&vport->mc_mac_list); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1333,7 +1350,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, req = (struct hclge_tx_buff_alloc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); - for (i = 0; i < HCLGE_TC_NUM; i++) { + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; req->tx_pkt_buff[i] = @@ -1370,17 +1387,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev) return cnt; } -static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) -{ - int i, cnt = 0; - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) - if (hdev->hw_tc_map & BIT(i) && - hdev->tm_info.hw_pfc_map & BIT(i)) - cnt++; - return cnt; -} - /* Get the number of pfc enabled TCs, which have private buffer */ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) @@ -1445,24 +1451,21 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) { u32 shared_buf_min, shared_buf_tc, shared_std; - int tc_num, pfc_enable_num; + int tc_num = hclge_get_tc_num(hdev); u32 shared_buf, aligned_mps; u32 rx_priv; int i; - tc_num = hclge_get_tc_num(hdev); - pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_dcb_supported(hdev)) - shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size; + shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + + hdev->dv_buf_size; else shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + hdev->dv_buf_size; - shared_buf_tc = pfc_enable_num * aligned_mps + - (tc_num - pfc_enable_num) * aligned_mps / 2 + - aligned_mps; + shared_buf_tc = tc_num * aligned_mps + aligned_mps; shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), HCLGE_BUF_SIZE_UNIT); @@ -1475,23 +1478,20 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, if (hnae3_dev_dcb_supported(hdev)) { buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high - - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + - roundup(aligned_mps / HCLGE_BUF_DIV_BY, + HCLGE_BUF_SIZE_UNIT); } else { buf_alloc->s_buf.self.high = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; buf_alloc->s_buf.self.low = - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + roundup(aligned_mps / HCLGE_BUF_DIV_BY, + HCLGE_BUF_SIZE_UNIT); } for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - if ((hdev->hw_tc_map & BIT(i)) && - (hdev->tm_info.hw_pfc_map & BIT(i))) { - buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; - buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; - } else { - buf_alloc->s_buf.tc_thrd[i].low = 0; - buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; - } + buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; + buf_alloc->s_buf.tc_thrd[i].high = HCLGE_BUF_MUL_BY * + aligned_mps; } return true; @@ -1544,12 +1544,13 @@ static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = max ? aligned_mps : 256; + priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; priv->wl.high = roundup(priv->wl.low + aligned_mps, HCLGE_BUF_SIZE_UNIT); } else { priv->wl.low = 0; - priv->wl.high = max ? (aligned_mps * 2) : aligned_mps; + priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : + aligned_mps; } priv->buf_size = priv->wl.high + hdev->dv_buf_size; @@ -1932,7 +1933,6 @@ static int hclge_init_msi(struct hclge_dev *hdev) static u8 hclge_check_speed_dup(u8 duplex, int speed) { - if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) duplex = HCLGE_MAC_FULL; @@ -2195,7 +2195,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) { - struct hclge_sfp_speed_cmd *resp = NULL; + struct hclge_sfp_speed_cmd *resp; struct hclge_desc desc; int ret = 0; @@ -2488,8 +2488,7 @@ int hclge_notify_client(struct hclge_dev *hdev, struct hnae3_client *client = hdev->nic_client; u16 i; - if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || - !client) + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) return 0; if (!client->ops->reset_notify) @@ -2634,7 +2633,7 @@ int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) return ret; } - if (!reset) + if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) continue; /* Inform VF to process the reset. @@ -3120,6 +3119,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev) for (i = 1; i < hdev->num_alloc_vport; i++) { struct hclge_vport *vport = &hdev->vport[i]; + /* vf keeps sending alive msg to pf per 2s, if pf doesn't + * receive a vf's alive msg for 8s, regards the vf is offline + */ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); @@ -3236,28 +3238,30 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, { struct hclge_rss_config_cmd *req; struct hclge_desc desc; - int key_offset; + int key_offset = 0; + int key_counts; int key_size; int ret; + key_counts = HCLGE_RSS_KEY_SIZE; req = (struct hclge_rss_config_cmd *)desc.data; - for (key_offset = 0; key_offset < 3; key_offset++) { + while (key_counts) { hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, false); req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); - if (key_offset == 2) - key_size = - HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; - else + if (key_counts >= HCLGE_RSS_HASH_KEY_NUM) key_size = HCLGE_RSS_HASH_KEY_NUM; - + else + key_size = key_counts; memcpy(req->hash_key, key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); + key_counts -= key_size; + key_offset++; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -3777,8 +3781,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, return 0; } -static int hclge_map_ring_to_vector(struct hnae3_handle *handle, - int vector, +static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -3795,8 +3798,7 @@ static int hclge_map_ring_to_vector(struct hnae3_handle *handle, return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); } -static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, - int vector, +static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -3817,8 +3819,7 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vectorid=%d, ret =%d\n", - vector_id, - ret); + vector_id, ret); return ret; } @@ -4122,19 +4123,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, switch (tuple_bit) { case BIT(INNER_DST_MAC): - for (i = 0; i < 6; i++) { - calc_x(key_x[5 - i], rule->tuples.dst_mac[i], + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], rule->tuples_mask.dst_mac[i]); - calc_y(key_y[5 - i], rule->tuples.dst_mac[i], + calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], rule->tuples_mask.dst_mac[i]); } return true; case BIT(INNER_SRC_MAC): - for (i = 0; i < 6; i++) { - calc_x(key_x[5 - i], rule->tuples.src_mac[i], + for (i = 0; i < ETH_ALEN; i++) { + calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], rule->tuples.src_mac[i]); - calc_y(key_y[5 - i], rule->tuples.src_mac[i], + calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], rule->tuples.src_mac[i]); } @@ -4170,19 +4171,19 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, return true; case BIT(INNER_SRC_IP): - calc_x(tmp_x_l, rule->tuples.src_ip[3], - rule->tuples_mask.src_ip[3]); - calc_y(tmp_y_l, rule->tuples.src_ip[3], - rule->tuples_mask.src_ip[3]); + calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX], + rule->tuples_mask.src_ip[IPV4_INDEX]); + calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX], + rule->tuples_mask.src_ip[IPV4_INDEX]); *(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l); return true; case BIT(INNER_DST_IP): - calc_x(tmp_x_l, rule->tuples.dst_ip[3], - rule->tuples_mask.dst_ip[3]); - calc_y(tmp_y_l, rule->tuples.dst_ip[3], - rule->tuples_mask.dst_ip[3]); + calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX], + rule->tuples_mask.dst_ip[IPV4_INDEX]); + calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX], + rule->tuples_mask.dst_ip[IPV4_INDEX]); *(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l); @@ -4421,6 +4422,7 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | BIT(INNER_IP_TOS); + /* check whether src/dst ip address used */ if (!spec->ip6src[0] && !spec->ip6src[1] && !spec->ip6src[2] && !spec->ip6src[3]) *unused_tuple |= BIT(INNER_SRC_IP); @@ -4450,6 +4452,7 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + /* check whether src/dst ip address used */ if (!spec->ip6src[0] && !spec->ip6src[1] && !spec->ip6src[2] && !spec->ip6src[3]) *unused_tuple |= BIT(INNER_SRC_IP); @@ -4645,14 +4648,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, case SCTP_V4_FLOW: case TCP_V4_FLOW: case UDP_V4_FLOW: - rule->tuples.src_ip[3] = + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); - rule->tuples_mask.src_ip[3] = + rule->tuples_mask.src_ip[IPV4_INDEX] = be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); - rule->tuples.dst_ip[3] = + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[3] = + rule->tuples_mask.dst_ip[IPV4_INDEX] = be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); @@ -4671,14 +4674,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, break; case IP_USER_FLOW: - rule->tuples.src_ip[3] = + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); - rule->tuples_mask.src_ip[3] = + rule->tuples_mask.src_ip[IPV4_INDEX] = be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); - rule->tuples.dst_ip[3] = + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[3] = + rule->tuples_mask.dst_ip[IPV4_INDEX] = be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; @@ -4695,14 +4698,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, case TCP_V6_FLOW: case UDP_V6_FLOW: be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.tcp_ip6_spec.ip6src, 4); + fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE); be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.tcp_ip6_spec.ip6src, 4); + fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE); be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.tcp_ip6_spec.ip6dst, 4); + fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.tcp_ip6_spec.ip6dst, 4); + fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); rule->tuples_mask.src_port = @@ -4718,14 +4721,14 @@ static int hclge_fd_get_tuple(struct hclge_dev *hdev, break; case IPV6_USER_FLOW: be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.usr_ip6_spec.ip6src, 4); + fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE); be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.usr_ip6_spec.ip6src, 4); + fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE); be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.usr_ip6_spec.ip6dst, 4); + fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE); be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.usr_ip6_spec.ip6dst, 4); + fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE); rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; @@ -4898,18 +4901,16 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, if (!hclge_fd_rule_exist(hdev, fs->location)) { dev_err(&hdev->pdev->dev, - "Delete fail, rule %d is inexistent\n", - fs->location); + "Delete fail, rule %d is inexistent\n", fs->location); return -ENOENT; } - ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, - fs->location, NULL, false); + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, + NULL, false); if (ret) return ret; - return hclge_fd_update_rule_list(hdev, NULL, fs->location, - false); + return hclge_fd_update_rule_list(hdev, NULL, fs->location, false); } static void hclge_del_all_fd_entries(struct hnae3_handle *handle, @@ -4995,13 +4996,13 @@ static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, struct ethtool_tcpip4_spec *spec, struct ethtool_tcpip4_spec *spec_mask) { - spec->ip4src = cpu_to_be32(rule->tuples.src_ip[3]); + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[3]); spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); spec->psrc = cpu_to_be16(rule->tuples.src_port); spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? @@ -5020,13 +5021,13 @@ static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, struct ethtool_usrip4_spec *spec, struct ethtool_usrip4_spec *spec_mask) { - spec->ip4src = cpu_to_be32(rule->tuples.src_ip[3]); + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); + 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); - spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[3]); + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? - 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); + 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); spec->tos = rule->tuples.ip_tos; spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? @@ -5044,18 +5045,20 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, struct ethtool_tcpip6_spec *spec_mask) { cpu_to_be32_array(spec->ip6src, - rule->tuples.src_ip, 4); + rule->tuples.src_ip, IPV6_SIZE); cpu_to_be32_array(spec->ip6dst, - rule->tuples.dst_ip, 4); + rule->tuples.dst_ip, IPV6_SIZE); if (rule->unused_tuple & BIT(INNER_SRC_IP)) - memset(spec_mask->ip6src, 0, sizeof(int) * 4); + memset(spec_mask->ip6src, 0, sizeof(int) * IPV6_SIZE); else - cpu_to_be32_array(spec->ip6src, rule->tuples_mask.src_ip, 4); + cpu_to_be32_array(spec->ip6src, rule->tuples_mask.src_ip, + IPV6_SIZE); if (rule->unused_tuple & BIT(INNER_DST_IP)) - memset(spec_mask->ip6dst, 0, sizeof(int) * 4); + memset(spec_mask->ip6dst, 0, sizeof(int) * IPV6_SIZE); else - cpu_to_be32_array(spec->ip6dst, rule->tuples_mask.dst_ip, 4); + cpu_to_be32_array(spec->ip6dst, rule->tuples_mask.dst_ip, + IPV6_SIZE); spec->psrc = cpu_to_be16(rule->tuples.src_port); spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? @@ -5070,21 +5073,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, struct ethtool_usrip6_spec *spec, struct ethtool_usrip6_spec *spec_mask) { - cpu_to_be32_array(spec->ip6src, - rule->tuples.src_ip, 4); - cpu_to_be32_array(spec->ip6dst, - rule->tuples.dst_ip, 4); + cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); if (rule->unused_tuple & BIT(INNER_SRC_IP)) - memset(spec_mask->ip6src, 0, sizeof(int) * 4); + memset(spec_mask->ip6src, 0, sizeof(int) * IPV6_SIZE); else cpu_to_be32_array(spec_mask->ip6src, - rule->tuples_mask.src_ip, 4); + rule->tuples_mask.src_ip, IPV6_SIZE); if (rule->unused_tuple & BIT(INNER_DST_IP)) - memset(spec_mask->ip6dst, 0, sizeof(int) * 4); + memset(spec_mask->ip6dst, 0, sizeof(int) * IPV6_SIZE); else cpu_to_be32_array(spec_mask->ip6dst, - rule->tuples_mask.dst_ip, 4); + rule->tuples_mask.dst_ip, IPV6_SIZE); spec->l4_proto = rule->tuples.ip_proto; spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? @@ -5345,7 +5346,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, #define HCLGE_SERDES_RETRY_NUM 100 #define HCLGE_MAC_LINK_STATUS_MS 10 -#define HCLGE_MAC_LINK_STATUS_NUM 20 +#define HCLGE_MAC_LINK_STATUS_NUM 100 #define HCLGE_MAC_LINK_STATUS_DOWN 0 #define HCLGE_MAC_LINK_STATUS_UP 1 @@ -5603,11 +5604,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (op == HCLGE_MAC_VLAN_ADD) { if ((!resp_code) || (resp_code == 1)) { return 0; - } else if (resp_code == 2) { + } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) { dev_err(&hdev->pdev->dev, "add mac addr failed for uc_overflow.\n"); return -ENOSPC; - } else if (resp_code == 3) { + } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) { dev_err(&hdev->pdev->dev, "add mac addr failed for mc_overflow.\n"); return -ENOSPC; @@ -5652,13 +5653,14 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) { +#define HCLGE_VF_NUM_IN_FIRST_DESC 192 int word_num; int bit_num; if (vfid > 255 || vfid < 0) return -EIO; - if (vfid >= 0 && vfid <= 191) { + if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { word_num = vfid / 32; bit_num = vfid % 32; if (clr) @@ -5666,7 +5668,7 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) else desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); } else { - word_num = (vfid - 192) / 32; + word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; bit_num = vfid % 32; if (clr) desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); @@ -5850,6 +5852,10 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) mutex_init(&hdev->umv_mutex); hdev->max_umv_size = allocated_size; + /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to + * preserve some unicast mac vlan table entries shared by pf + * and its vfs. + */ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_req_vfs + 2); @@ -5936,9 +5942,12 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) if (is_free) { if (vport->used_umv_num > hdev->priv_umv_size) hdev->share_umv_size++; - vport->used_umv_num--; + + if (vport->used_umv_num > 0) + vport->used_umv_num--; } else { - if (vport->used_umv_num >= hdev->priv_umv_size) + if (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size > 0) hdev->share_umv_size--; vport->used_umv_num++; } @@ -5968,8 +5977,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, is_multicast_ether_addr(addr)) { dev_err(&hdev->pdev->dev, "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", - addr, - is_zero_ether_addr(addr), + addr, is_zero_ether_addr(addr), is_broadcast_ether_addr(addr), is_multicast_ether_addr(addr)); return -EINVAL; @@ -6033,9 +6041,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, if (is_zero_ether_addr(addr) || is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { - dev_dbg(&hdev->pdev->dev, - "Remove mac err! invalid mac:%pM.\n", - addr); + dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n", + addr); return -EINVAL; } @@ -6075,18 +6082,16 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, memset(&req, 0, sizeof(req)); hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); - if (!status) { - /* This mac addr exist, update VFID for it */ - hclge_update_desc_vfid(desc, vport->vport_id, false); - status = hclge_add_mac_vlan_tbl(vport, &req, desc); - } else { + if (status) { /* This mac addr do not exist, add new entry for it */ memset(desc[0].data, 0, sizeof(desc[0].data)); memset(desc[1].data, 0, sizeof(desc[0].data)); memset(desc[2].data, 0, sizeof(desc[0].data)); - hclge_update_desc_vfid(desc, vport->vport_id, false); - status = hclge_add_mac_vlan_tbl(vport, &req, desc); } + status = hclge_update_desc_vfid(desc, vport->vport_id, false); + if (status) + return status; + status = hclge_add_mac_vlan_tbl(vport, &req, desc); if (status == -ENOSPC) dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); @@ -6123,7 +6128,9 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { /* This mac addr exist, remove this handle's VFID for it */ - hclge_update_desc_vfid(desc, vport->vport_id, true); + status = hclge_update_desc_vfid(desc, vport->vport_id, true); + if (status) + return status; if (hclge_is_all_function_id_zero(desc)) /* All the vfid is zero, so need to delete this entry */ @@ -6149,7 +6156,6 @@ void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, enum HCLGE_MAC_ADDR_TYPE mac_type) { struct hclge_vport_mac_addr_cfg *mac_cfg; - struct hclge_dev *hdev = vport->back; struct list_head *list; if (!vport->vport_id) @@ -6159,16 +6165,13 @@ void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, if (!mac_cfg) return; - mac_cfg->vport_id = vport->vport_id; mac_cfg->hd_tbl_status = true; memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &hdev->uc_mac_list : &hdev->mc_mac_list; + &vport->uc_mac_list : &vport->mc_mac_list; - mutex_lock(&hdev->vport_cfg_mutex); list_add_tail(&mac_cfg->node, list); - mutex_unlock(&hdev->vport_cfg_mutex); } void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, @@ -6176,20 +6179,17 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, enum HCLGE_MAC_ADDR_TYPE mac_type) { struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct hclge_dev *hdev = vport->back; struct list_head *list; bool uc_flag, mc_flag; list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &hdev->uc_mac_list : &hdev->mc_mac_list; + &vport->uc_mac_list : &vport->mc_mac_list; uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; - mutex_lock(&hdev->vport_cfg_mutex); list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (mac_cfg->vport_id == vport->vport_id && - ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { + if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { if (uc_flag && mac_cfg->hd_tbl_status) hclge_rm_uc_addr_common(vport, mac_addr); @@ -6201,59 +6201,51 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, break; } } - - mutex_unlock(&hdev->vport_cfg_mutex); } void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type) { struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; - struct hclge_dev *hdev = vport->back; struct list_head *list; list = (mac_type == HCLGE_MAC_ADDR_UC) ? - &hdev->uc_mac_list : &hdev->mc_mac_list; + &vport->uc_mac_list : &vport->mc_mac_list; - mutex_lock(&hdev->vport_cfg_mutex); list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (mac_cfg->vport_id == vport->vport_id) { - if (mac_type == HCLGE_MAC_ADDR_UC && - mac_cfg->hd_tbl_status) - hclge_rm_uc_addr_common(vport, - mac_cfg->mac_addr); - - if (mac_type == HCLGE_MAC_ADDR_MC && - mac_cfg->hd_tbl_status) - hclge_rm_mc_addr_common(vport, - mac_cfg->mac_addr); - - mac_cfg->hd_tbl_status = false; - if (is_del_list) { - list_del(&mac_cfg->node); - kfree(mac_cfg); - } + if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); + + if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); + + mac_cfg->hd_tbl_status = false; + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); } } - - mutex_unlock(&hdev->vport_cfg_mutex); } static void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) { - struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct hclge_vport_mac_addr_cfg *mac, *tmp; + struct hclge_vport *vport; + int i; mutex_lock(&hdev->vport_cfg_mutex); - list_for_each_entry_safe(mac_cfg, tmp, &hdev->uc_mac_list, node) { - list_del(&mac_cfg->node); - kfree(mac_cfg); - } + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } - list_for_each_entry_safe(mac_cfg, tmp, &hdev->mc_mac_list, node) { - list_del(&mac_cfg->node); - kfree(mac_cfg); + list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } } - mutex_unlock(&hdev->vport_cfg_mutex); } @@ -6368,7 +6360,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return -EINVAL; } - if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) + if ((!is_first || is_kdump_kernel()) && + hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_warn(&hdev->pdev->dev, "remove old uc mac address fail.\n"); @@ -6412,7 +6405,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, } static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - u8 fe_type, bool filter_en) + u8 fe_type, bool filter_en, u8 vf_id) { struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; @@ -6423,6 +6416,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; req->vlan_fe = filter_en ? fe_type : 0; + req->vf_id = vf_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -6451,12 +6445,13 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) { hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, enable); + HCLGE_FILTER_FE_EGRESS, enable, 0); hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, enable); + HCLGE_FILTER_FE_INGRESS, enable, 0); } else { hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS_V1_B, enable); + HCLGE_FILTER_FE_EGRESS_V1_B, enable, + 0); } if (enable) handle->netdev_flags |= HNAE3_VLAN_FLTR; @@ -6551,9 +6546,9 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); - vlan_offset_160 = vlan_id / 160; - vlan_offset_byte = (vlan_id % 160) / 8; - vlan_offset_byte_val = 1 << (vlan_id % 8); + vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_B; + vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_B) / HCLGE_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req->vlan_offset = vlan_offset_160; @@ -6813,19 +6808,27 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) int i; if (hdev->pdev->revision >= HNAE3_REVISION_ID_21) { - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, true); - if (ret) - return ret; + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + ret = hclge_set_vlan_filter_ctrl(hdev, + HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, + true, + vport->vport_id); + if (ret) + return ret; + } ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true); + HCLGE_FILTER_FE_INGRESS, true, + 0); if (ret) return ret; } else { ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, HCLGE_FILTER_FE_EGRESS_V1_B, - true); + true, 0); if (ret) return ret; } @@ -6944,6 +6947,7 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) struct hclge_vport *vport; int i; + mutex_lock(&hdev->vport_cfg_mutex); for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { @@ -6951,6 +6955,7 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) kfree(vlan); } } + mutex_unlock(&hdev->vport_cfg_mutex); } int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) @@ -7074,7 +7079,11 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, u16 state; int ret; - if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) + if (hdev->pdev->revision == 0x20) + return -EOPNOTSUPP; + + /* qos is a 3 bits value, so can not be bigger than 7 */ + if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) return -EINVAL; if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; @@ -7135,8 +7144,9 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) { struct hclge_dev *hdev = vport->back; - int i, max_frm_size, ret = 0; + int i, max_frm_size, ret; + /* HW supprt 2 layer vlan */ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; if (max_frm_size < HCLGE_MAC_MIN_FRAME || max_frm_size > HCLGE_MAC_MAX_FRAME) @@ -7246,7 +7256,7 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) int reset_try_times = 0; int reset_status; u16 queue_gid; - int ret = 0; + int ret; queue_gid = hclge_covert_handle_qid_global(handle, queue_id); @@ -7263,7 +7273,6 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) return ret; } - reset_try_times = 0; while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { /* Wait for tqp hw reset */ msleep(20); @@ -7302,7 +7311,6 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) return; } - reset_try_times = 0; while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { /* Wait for tqp hw reset */ msleep(20); @@ -7819,12 +7827,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->reset_type = HNAE3_NONE_RESET; hdev->reset_level = HNAE3_FUNC_RESET; ae_dev->priv = hdev; + + /* HW supprt 2 layer vlan */ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; mutex_init(&hdev->vport_lock); mutex_init(&hdev->vport_cfg_mutex); - INIT_LIST_HEAD(&hdev->uc_mac_list); - INIT_LIST_HEAD(&hdev->mc_mac_list); ret = hclge_pci_init(hdev); if (ret) { @@ -7997,7 +8005,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@ -8018,7 +8026,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev) int i; for (i = 0; i < hdev->num_alloc_vport; i++) { - hclge_vport_start(vport); + hclge_vport_stop(vport); vport++; } } @@ -8088,8 +8096,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_init_fd_config(hdev); if (ret) { - dev_err(&pdev->dev, - "fd table init fail, ret=%d\n", ret); + dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); return ret; } @@ -8135,7 +8142,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) synchronize_irq(hdev->misc_vector.vector_irq); hclge_hw_error_set_state(hdev, false); - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); @@ -8272,6 +8279,7 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, void *data) { #define HCLGE_32_BIT_REG_RTN_DATANUM 8 +#define HCLGE_32_BIT_DESC_NODATA_LEN 2 struct hclge_desc *desc; u32 *reg_val = data; @@ -8283,7 +8291,8 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, if (regs_num == 0) return 0; - cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); + cmd_num = DIV_ROUND_UP(regs_num + HCLGE_32_BIT_DESC_NODATA_LEN, + HCLGE_32_BIT_REG_RTN_DATANUM); desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); if (!desc) return -ENOMEM; @@ -8300,7 +8309,8 @@ static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, for (i = 0; i < cmd_num; i++) { if (i == 0) { desc_data = (__le32 *)(&desc[i].data[0]); - n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; + n = HCLGE_32_BIT_REG_RTN_DATANUM - + HCLGE_32_BIT_DESC_NODATA_LEN; } else { desc_data = (__le32 *)(&desc[i]); n = HCLGE_32_BIT_REG_RTN_DATANUM; @@ -8322,6 +8332,7 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, void *data) { #define HCLGE_64_BIT_REG_RTN_DATANUM 4 +#define HCLGE_64_BIT_DESC_NODATA_LEN 1 struct hclge_desc *desc; u64 *reg_val = data; @@ -8333,7 +8344,8 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, if (regs_num == 0) return 0; - cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); + cmd_num = DIV_ROUND_UP(regs_num + HCLGE_64_BIT_DESC_NODATA_LEN, + HCLGE_64_BIT_REG_RTN_DATANUM); desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); if (!desc) return -ENOMEM; @@ -8350,7 +8362,8 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, for (i = 0; i < cmd_num; i++) { if (i == 0) { desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; + n = HCLGE_64_BIT_REG_RTN_DATANUM - + HCLGE_64_BIT_DESC_NODATA_LEN; } else { desc_data = (__le64 *)(&desc[i]); n = HCLGE_64_BIT_REG_RTN_DATANUM; @@ -8410,8 +8423,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, } data = (u32 *)data + regs_num_32_bit; - ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, - data); + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, data); if (ret) dev_err(&hdev->pdev->dev, "Get 64 bit register failed, ret = %d.\n", ret); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 208ba6bb632a14572bb6b14227935e4c16f0f210..65d336b9e43a850f725eaea03e139bfa0f198611 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -62,6 +62,9 @@ #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) +#define HCLGE_VLAN_ID_B 160 +#define HCLGE_VLAN_BYTE_SIZE 8 + #define HCLGE_TQP_RESET_TRY_TIMES 10 #define HCLGE_PHY_PAGE_MDIX 0 @@ -190,7 +193,7 @@ struct hclge_mac { u8 autoneg; u8 duplex; u32 speed; - int link; /* store the link status of mac & phy (if phy exit)*/ + int link; /* store the link status of mac & phy (if phy exit) */ struct phy_device *phydev; struct mii_bus *mdio_bus; phy_interface_t phy_if; @@ -406,6 +409,7 @@ enum HCLGE_FD_KEY_TYPE { enum HCLGE_FD_STAGE { HCLGE_FD_STAGE_1, HCLGE_FD_STAGE_2, + MAX_STAGE_NUM, }; /* OUTER_XXX indicates tuples in tunnel header of tunnel packet @@ -460,7 +464,7 @@ enum HCLGE_FD_META_DATA { struct key_info { u8 key_type; - u8 key_length; + u8 key_length; /* use bit as unit */ }; static const struct key_info meta_data_key_info[] = { @@ -534,18 +538,23 @@ struct hclge_fd_key_cfg { struct hclge_fd_cfg { u8 fd_mode; - u16 max_key_length; + u16 max_key_length; /* use bit as unit */ u32 proto_support; - u32 rule_num[2]; /* rule entry number */ - u16 cnt_num[2]; /* rule hit counter number */ - struct hclge_fd_key_cfg key_cfg[2]; + u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ + u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ + struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; }; +#define IPV4_INDEX 3 +#define IPV6_SIZE 4 struct hclge_fd_rule_tuples { - u8 src_mac[6]; - u8 dst_mac[6]; - u32 src_ip[4]; - u32 dst_ip[4]; + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + /* Be compatible for ip address of both ipv4 and ipv6. + * For ipv4 address, we store it in src/dst_ip[3]. + */ + u32 src_ip[IPV6_SIZE]; + u32 dst_ip[IPV6_SIZE]; u16 src_port; u16 dst_port; u16 vlan_tag1; @@ -581,7 +590,6 @@ struct hclge_fd_ad_data { struct hclge_vport_mac_addr_cfg { struct list_head node; - int vport_id; int hd_tbl_status; u8 mac_addr[ETH_ALEN]; }; @@ -739,8 +747,6 @@ struct hclge_dev { struct mutex umv_mutex; /* protect share_umv_size */ struct mutex vport_cfg_mutex; /* Protect stored vf table */ - struct list_head uc_mac_list; /* Store VF unicast table */ - struct list_head mc_mac_list; /* Store VF multicast table */ }; /* VPort level vlan tag configuration for TX direction */ @@ -780,6 +786,17 @@ enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_MAX }; +#pragma pack(1) +struct hclge_vf_vlan_cfg { + u8 mbx_cmd; + u8 subcode; + u8 is_kill; + u16 vlan; + u16 proto; +}; + +#pragma pack() + struct hclge_vlan_info { u16 vlan_proto; /* sofar support 802.1Q only */ u16 qos; @@ -822,6 +839,9 @@ struct hclge_vport { unsigned long state; unsigned long last_active_jiffies; int mps; /* Max packet size */ + + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index e0fddfd8542acc0590be2121ecfe7e070cfa1f9e..f7d92e80fb3b926bf62a308387aef4a0deb9160e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -308,34 +308,34 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { + struct hclge_vf_vlan_cfg *msg_cmd; int status = 0; - if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { + msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg; + if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) { struct hnae3_handle *handle = &vport->nic; u16 vlan, proto; bool is_kill; - is_kill = !!mbx_req->msg[2]; - memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); - memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); + is_kill = !!msg_cmd->is_kill; + vlan = msg_cmd->vlan; + proto = msg_cmd->proto; status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); - } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { + } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; - bool en = mbx_req->msg[2] ? true : false; + bool en = msg_cmd->is_kill ? true : false; status = hclge_en_hw_strip_rxvtag(handle, en); - } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) { - struct hclge_vlan_info vlan_info; - u16 state; - - memcpy(&state, &mbx_req->msg[2], sizeof(u16)); - memcpy(&vlan_info.vlan_tag, &mbx_req->msg[4], sizeof(u16)); - memcpy(&vlan_info.qos, &mbx_req->msg[6], sizeof(u16)); - memcpy(&vlan_info.vlan_proto, &mbx_req->msg[8], sizeof(u16)); - status = hclge_update_port_base_vlan_cfg(vport, state, - &vlan_info); - } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { + } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) { + struct hclge_vlan_info *vlan_info; + u16 *state; + + state = (u16 *)&mbx_req->msg[2]; + vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4]; + status = hclge_update_port_base_vlan_cfg(vport, *state, + vlan_info); + } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { u8 state; state = vport->port_base_vlan_cfg.state; @@ -373,7 +373,7 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport, vf_tc_map |= BIT(i); ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map, - sizeof(u8)); + sizeof(vf_tc_map)); return ret; } @@ -410,24 +410,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport, HCLGE_TQPS_DEPTH_INFO_LEN); } +static int hclge_get_vf_media_type(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_data; + + resp_data = hdev->hw.mac.media_type; + return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data, + sizeof(resp_data)); +} + static int hclge_get_link_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[10]; - u16 media_type; + u8 msg_data[8]; u8 dest_vfid; u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; duplex = hdev->hw.mac.duplex; - media_type = hdev->hw.mac.media_type; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); - memcpy(&msg_data[8], &media_type, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ @@ -669,11 +677,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_GET_VF_FLR_STATUS: + mutex_lock(&hdev->vport_cfg_mutex); hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); hclge_rm_vport_all_vlan_table(vport, true); + mutex_unlock(&hdev->vport_cfg_mutex); break; case HCLGE_MBX_GET_RSS_KEY: ret = hclge_get_rss_key(vport, req); @@ -684,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) case HCLGE_MBX_GET_LINK_MODE: hclge_get_vf_link_mode(vport, req); break; + case HCLGE_MBX_GET_MEDIA_TYPE: + ret = hclge_get_vf_media_type(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to media type for VF\n", + ret); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 1392db070029f46e376438e9508f42d72af08bf3..a4e1784d7c770536fdc7fc4b92b746d12ac76c99 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -2,6 +2,7 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include +#include #include #include "kcompat.h" #include "hclge_cmd.h" @@ -27,17 +28,6 @@ enum hclge_mdio_c22_op_seq { #define HCLGE_MDIO_STA_B 0 -struct hclge_mdio_cfg_cmd { - u8 ctrl_bit; - u8 phyid; - u8 phyad; - u8 rsvd; - __le16 reserve; - __le16 data_wr; - __le16 data_rd; - __le16 sta; -}; - static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, u16 data) { @@ -119,6 +109,13 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) return le16_to_cpu(mdio_cmd->data_rd); } +static int hclge_phy_marvell_fixup(struct phy_device *phydev) +{ + phydev->dev_flags |= MARVELL_PHY_M1510_HNS3_LEDS; + + return 0; +} + int hclge_mac_mdio_config(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; @@ -162,6 +159,15 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) mac->phydev = phydev; mac->mdio_bus = mdio_bus; + /* register the PHY board fixup (for Marvell 88E1510) */ + ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510, + MARVELL_PHY_ID_MASK, + hclge_phy_marvell_fixup); + /* we can live without it, so just issue a warning */ + if (ret) + dev_warn(&hdev->pdev->dev, + "Cannot register PHY board fixup\n"); + return 0; } @@ -195,11 +201,29 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) struct hclge_dev *hdev = vport->back; struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; +#ifdef HAS_LINK_MODE_OPS + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; +#endif int ret; if (!phydev) return 0; +#ifdef HAS_LINK_MODE_OPS + linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); + + ret = phy_connect_direct(netdev, phydev, + hclge_mac_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) { + netdev_err(netdev, "phy_connect_direct err.\n"); + return ret; + } + + linkmode_copy(mask, hdev->hw.mac.supported); + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); +#else phydev->supported &= ~SUPPORTED_FIBRE; ret = phy_connect_direct(netdev, phydev, @@ -212,7 +236,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) phydev->supported &= *hdev->hw.mac.supported; phydev->advertising = phydev->supported; - +#endif return 0; } @@ -225,6 +249,9 @@ void hclge_mac_disconnect_phy(struct hnae3_handle *handle) if (!phydev) return; + phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510, + MARVELL_PHY_ID_MASK); + phy_disconnect(phydev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index ef095d9c566f4918713b0e029e2200f4b90fd0ca..787fa9c617a589d2a1eeb06f82943410b245cd5b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -4,6 +4,17 @@ #ifndef __HCLGE_MDIO_H #define __HCLGE_MDIO_H +struct hclge_mdio_cfg_cmd { + u8 ctrl_bit; + u8 phyid; + u8 phyad; + u8 rsvd; + __le16 reserve; + __le16 data_wr; + __le16 data_rd; + __le16 sta; +}; + int hclge_mac_mdio_config(struct hclge_dev *hdev); int hclge_mac_connect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index c10846baeed142e6595dc1872a79b04bcde49ef4..c34ab0608e6873c724f726ce2846252b6c606c0d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -43,13 +43,17 @@ enum hclge_shaper_level { static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, u8 *ir_b, u8 *ir_u, u8 *ir_s) { +#define DIVISOR_CLK (1000 * 8) +#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK) + const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 6 * 256, /* Prioriy level */ 6 * 32, /* Prioriy group level */ 6 * 8, /* Port level */ 6 * 256 /* Qset level */ }; - u8 ir_u_calc = 0, ir_s_calc = 0; + u8 ir_u_calc = 0; + u8 ir_s_calc = 0; u32 ir_calc; u32 tick; @@ -66,7 +70,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, * ir_calc = ---------------- * 1000 * tick * 1 */ - ir_calc = (1008000 + (tick >> 1) - 1) / tick; + ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick; if (ir_calc == ir) { *ir_b = 126; @@ -78,27 +82,28 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, /* Increasing the denominator to select ir_s value */ while (ir_calc > ir) { ir_s_calc++; - ir_calc = 1008000 / (tick * (1 << ir_s_calc)); + ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc)); } if (ir_calc == ir) *ir_b = 126; else - *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; + *ir_b = (ir * tick * (1 << ir_s_calc) + + (DIVISOR_CLK >> 1)) / DIVISOR_CLK; } else { /* Increasing the numerator to select ir_u value */ u32 numerator; while (ir_calc < ir) { ir_u_calc++; - numerator = 1008000 * (1 << ir_u_calc); + numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc); ir_calc = (numerator + (tick >> 1)) / tick; } if (ir_calc == ir) { *ir_b = 126; } else { - u32 denominator = (8000 * (1 << --ir_u_calc)); + u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc)); *ir_b = (ir * tick + (denominator >> 1)) / denominator; } } @@ -119,14 +124,13 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev, opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) return -EINVAL; - for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { + for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) { hclge_cmd_setup_basic_desc(&desc[i], opcode, true); - if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - else - desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); } + hclge_cmd_setup_basic_desc(&desc[i], opcode, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); if (ret) return ret; @@ -219,8 +223,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) trans_gap = pause_param->pause_trans_gap; trans_time = le16_to_cpu(pause_param->pause_trans_time); - return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, - trans_time); + return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); } static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) @@ -361,29 +364,36 @@ static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, + u8 bs_b, u8 bs_s) +{ + u32 shapping_para = 0; + + hclge_tm_set_field(shapping_para, IR_B, ir_b); + hclge_tm_set_field(shapping_para, IR_U, ir_u); + hclge_tm_set_field(shapping_para, IR_S, ir_s); + hclge_tm_set_field(shapping_para, BS_B, bs_b); + hclge_tm_set_field(shapping_para, BS_S, bs_s); + + return shapping_para; +} + static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pg_id, - u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) + u32 shapping_para) { struct hclge_pg_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; - u32 shapping_para = 0; opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : - HCLGE_OPC_TM_PG_C_SHAPPING; + HCLGE_OPC_TM_PG_C_SHAPPING; hclge_cmd_setup_basic_desc(&desc, opcode, false); shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; shap_cfg_cmd->pg_id = pg_id; - hclge_tm_set_field(shapping_para, IR_B, ir_b); - hclge_tm_set_field(shapping_para, IR_U, ir_u); - hclge_tm_set_field(shapping_para, IR_S, ir_s); - hclge_tm_set_field(shapping_para, BS_B, bs_b); - hclge_tm_set_field(shapping_para, BS_S, bs_s); - shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -406,11 +416,9 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; - hclge_tm_set_field(shapping_para, IR_B, ir_b); - hclge_tm_set_field(shapping_para, IR_U, ir_u); - hclge_tm_set_field(shapping_para, IR_S, ir_s); - hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); - hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); + shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); @@ -419,16 +427,14 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pri_id, - u8 ir_b, u8 ir_u, u8 ir_s, - u8 bs_b, u8 bs_s) + u32 shapping_para) { struct hclge_pri_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; - u32 shapping_para = 0; opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : - HCLGE_OPC_TM_PRI_C_SHAPPING; + HCLGE_OPC_TM_PRI_C_SHAPPING; hclge_cmd_setup_basic_desc(&desc, opcode, false); @@ -436,12 +442,6 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pri_id = pri_id; - hclge_tm_set_field(shapping_para, IR_B, ir_b); - hclge_tm_set_field(shapping_para, IR_U, ir_u); - hclge_tm_set_field(shapping_para, IR_S, ir_s); - hclge_tm_set_field(shapping_para, BS_B, bs_b); - hclge_tm_set_field(shapping_para, BS_S, bs_s); - shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -531,6 +531,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) max_rss_size = min_t(u16, hdev->rss_size_max, vport->alloc_tqps / kinfo->num_tc); + /* Set to user value, no larger than max_rss_size. */ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && kinfo->req_rss_size <= max_rss_size) { dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", @@ -538,6 +539,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->rss_size = kinfo->req_rss_size; } else if (kinfo->rss_size > max_rss_size || (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + /* Set to the maximum specification value (max_rss_size). */ dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", kinfo->rss_size, max_rss_size); kinfo->rss_size = max_rss_size; @@ -604,12 +606,13 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) static void hclge_tm_pg_info_init(struct hclge_dev *hdev) { +#define BW_PERCENT 100 u8 i; for (i = 0; i < hdev->tm_info.num_pg; i++) { int k; - hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; + hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; hdev->tm_info.pg_info[i].pg_id = i; hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; @@ -621,7 +624,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; for (k = 0; k < hdev->tm_info.num_tc; k++) - hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; + hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; } } @@ -682,6 +685,7 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) { u8 ir_u, ir_b, ir_s; + u32 shaper_para; int ret; u32 i; @@ -699,18 +703,21 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para); if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - ir_b, ir_u, ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para); if (ret) return ret; } @@ -730,8 +737,7 @@ static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) /* pg to prio */ for (i = 0; i < hdev->tm_info.num_pg; i++) { /* Cfg dwrr */ - ret = hclge_tm_pg_weight_cfg(hdev, i, - hdev->tm_info.pg_dwrr[i]); + ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); if (ret) return ret; } @@ -811,6 +817,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u8 ir_u, ir_b, ir_s; + u32 shaper_para; int ret; u32 i; @@ -822,17 +829,19 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_tm_pri_shapping_cfg( - hdev, HCLGE_TM_SHAP_C_BUCKET, i, - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, + shaper_para); if (ret) return ret; - ret = hclge_tm_pri_shapping_cfg( - hdev, HCLGE_TM_SHAP_P_BUCKET, i, - ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, + shaper_para); if (ret) return ret; } @@ -844,6 +853,7 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; u8 ir_u, ir_b, ir_s; + u32 shaper_para; int ret; ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, @@ -851,18 +861,19 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, - vport->vport_id, - 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + vport->vport_id, shaper_para); if (ret) return ret; + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, - vport->vport_id, - ir_b, ir_u, ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); + vport->vport_id, shaper_para); if (ret) return ret; @@ -1211,8 +1222,8 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) struct hclge_mac *mac = &hdev->hw.mac; return hclge_pause_param_cfg(hdev, mac->mac_addr, - HCLGE_DEFAULT_PAUSE_TRANS_GAP, - HCLGE_DEFAULT_PAUSE_TRANS_TIME); + HCLGE_DEFAULT_PAUSE_TRANS_GAP, + HCLGE_DEFAULT_PAUSE_TRANS_TIME); } static int hclge_pfc_setup_hw(struct hclge_dev *hdev) @@ -1333,8 +1344,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) ret = hclge_pfc_setup_hw(hdev); if (init && ret == -EOPNOTSUPP) dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); - else + else if (ret) { + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", + ret); return ret; + } return hclge_tm_bp_setup(hdev); } @@ -1357,7 +1371,8 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { - u8 i, bit_map = 0; + u8 bit_map = 0; + u8 i; hdev->tm_info.num_tc = num_tc; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index d5765c8cf3a3084dbae68fedfc1050125cd49083..5a7798096cc1696c7385d61c63aef20468af1352 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -81,7 +81,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) if (ring->flag == HCLGEVF_TYPE_CSQ) { reg_val = (u32)ring->desc_dma_addr; hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + reg_val = (u32)(ring->desc_dma_addr >> + HCLGEVF_RING_BASEADDR_SHIFT); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); @@ -93,7 +94,8 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) } else { reg_val = (u32)ring->desc_dma_addr; hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + reg_val = (u32)(ring->desc_dma_addr >> + HCLGEVF_RING_BASEADDR_SHIFT); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); @@ -363,8 +365,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) +{ + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); +} + void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock_bh(&hdev->hw.cmq.crq.lock); + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + hclgevf_cmd_uninit_regs(&hdev->hw); + spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index c1d29b1fc773794db3b9c37d5001eee29eac47c1..3b1058578e19b7483a796394062f026bcf16ad36 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -242,6 +242,8 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 +#define HCLGEVF_RING_BASEADDR_SHIFT 32 + static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) { writel(value, base + reg); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 3ea83d1a68598768d31e0c2b47bcb0d761b604f9..fd256d256f2b214c0e22e4c193d44292cca13216 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -47,8 +47,7 @@ static const u8 hclgevf_hash_key[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static inline struct hclgevf_dev *hclgevf_ae_get_hdev( - struct hnae3_handle *handle) +static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) { if (!handle->client) return container_of(handle, struct hclgevf_dev, nic); @@ -179,10 +178,8 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, u8 *data) { - u8 *p = (char *)data; - if (strset == ETH_SS_STATS) - p = hclgevf_tqps_get_strings(handle, p); + (void)hclgevf_tqps_get_strings(handle, data); } static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) @@ -196,7 +193,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) int status; status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0, - true, &resp_msg, sizeof(u8)); + true, &resp_msg, sizeof(resp_msg)); if (status) { dev_err(&hdev->pdev->dev, "VF request to get TC info from PF failed %d", @@ -285,13 +282,33 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, - 2, true, resp_data, 2); + sizeof(msg_data), true, resp_data, + sizeof(resp_data)); if (!ret) qid_in_pf = *(u16 *)resp_data; return qid_in_pf; } +static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) +{ + u8 resp_msg; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, + true, &resp_msg, sizeof(resp_msg)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get the pf port media type failed %d", + ret); + return ret; + } + + hdev->hw.mac.media_type = resp_msg; + + return 0; +} + static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { struct hclgevf_tqp *tqp; @@ -362,7 +379,7 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev) u8 resp_msg; status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL, - 0, false, &resp_msg, sizeof(u8)); + 0, false, &resp_msg, sizeof(resp_msg)); if (status) dev_err(&hdev->pdev->dev, "VF failed to fetch link status(%d) from PF", status); @@ -397,11 +414,13 @@ void hclgevf_update_link_mode(struct hclgevf_dev *hdev) u8 resp_msg; send_msg = HCLGEVF_ADVERTISING; - hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, - sizeof(u8), false, &resp_msg, sizeof(u8)); + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, + &send_msg, sizeof(send_msg), false, + &resp_msg, sizeof(resp_msg)); send_msg = HCLGEVF_SUPPORTED; - hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, - sizeof(u8), false, &resp_msg, sizeof(u8)); + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, + &send_msg, sizeof(send_msg), false, + &resp_msg, sizeof(resp_msg)); } static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) @@ -489,13 +508,14 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, { struct hclgevf_rss_config_cmd *req; struct hclgevf_desc desc; - int key_offset; + int key_offset = 0; + int key_counts; int key_size; int ret; + key_counts = HCLGEVF_RSS_KEY_SIZE; req = (struct hclgevf_rss_config_cmd *)desc.data; - - for (key_offset = 0; key_offset < 3; key_offset++) { + while (key_counts) { hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_GENERIC_CONFIG, false); @@ -504,15 +524,15 @@ static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); - if (key_offset == 2) - key_size = - HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; - else + if (key_counts >= HCLGEVF_RSS_HASH_KEY_NUM) key_size = HCLGEVF_RSS_HASH_KEY_NUM; - + else + key_size = key_counts; memcpy(req->hash_key, key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); + key_counts -= key_size; + key_offset++; ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -1137,7 +1157,7 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, HCLGE_MBX_MAC_VLAN_UC_MODIFY; status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST, - subcode, msg_data, ETH_ALEN * 2, + subcode, msg_data, sizeof(msg_data), true, NULL, 0); if (!status) ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); @@ -1193,7 +1213,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN]; - if (vlan_id > 4095) + if (vlan_id > MAX_VLAN_ID) return -EINVAL; if (proto != htons(ETH_P_8021Q)) @@ -1227,7 +1247,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) return 0; - memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); + memcpy(msg_data, &queue_id, sizeof(queue_id)); /* disable vf queue before send queue reset msg to PF */ ret = hclgevf_tqp_enable(hdev, queue_id, 0, false); @@ -1235,7 +1255,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) return ret; return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, - 2, true, NULL, 0); + sizeof(msg_data), true, NULL, 0); } static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) @@ -1371,7 +1391,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) switch (hdev->reset_type) { case HNAE3_VF_FUNC_RESET: ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, - 0, true, NULL, sizeof(u8)); + 0, true, NULL, 0); break; case HNAE3_FLR_RESET: set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); @@ -1493,7 +1513,8 @@ static void hclgevf_reset_event(struct pci_dev *pdev, struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hclgevf_dev *hdev = ae_dev->priv; - if (time_before(jiffies, (hdev->last_reset_time + 5 * HZ))) + if (time_before(jiffies, (hdev->last_reset_time + + HCLGEVF_RESET_TASK_INTERVAL * HZ))) return; dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); @@ -1603,8 +1624,10 @@ static void hclgevf_service_timer(struct timer_list *t) { struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer); - mod_timer(&hdev->service_timer, jiffies + 5 * HZ); + mod_timer(&hdev->service_timer, jiffies + + HCLGEVF_GENERAL_TASK_INTERVAL * HZ); + hdev->stats_timer++; hclgevf_task_schedule(hdev); } @@ -1623,8 +1646,8 @@ static void hclgevf_reset_service_task(struct work_struct *work) &hdev->reset_state)) { /* PF has initmated that it is about to reset the hardware. * We now have to poll & check if hardware has actually - * completed the reset sequence. On hardware reset - * completion, VF needs to reset the client and ae device. + * completed the reset sequence. On hardware reset completion, + * VF needs to reset the client and ae device. */ hdev->reset_attempts = 0; @@ -1640,7 +1663,7 @@ static void hclgevf_reset_service_task(struct work_struct *work) } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state)) { /* we could be here when either of below happens: - * 1. reset was initiated due to watchdog timeout due to + * 1. reset was initiated due to watchdog timeout caused by * a. IMP was earlier reset and our TX got choked down and * which resulted in watchdog reacting and inducing VF * reset. This also means our cmdq would be unreliable. @@ -1702,7 +1725,8 @@ static void hclgevf_keep_alive_timer(struct timer_list *t) struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); schedule_work(&hdev->keep_alive_task); - mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); + mod_timer(&hdev->keep_alive_timer, jiffies + + HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); } static void hclgevf_keep_alive_task(struct work_struct *work) @@ -1713,11 +1737,11 @@ static void hclgevf_keep_alive_task(struct work_struct *work) hdev = container_of(work, struct hclgevf_dev, keep_alive_task); - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) return; ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, - 0, false, &respmsg, sizeof(u8)); + 0, false, &respmsg, sizeof(respmsg)); if (ret) dev_err(&hdev->pdev->dev, "VF sends keep alive cmd failed(=%d)\n", ret); @@ -1725,9 +1749,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work) static void hclgevf_service_task(struct work_struct *work) { + struct hnae3_handle *handle; struct hclgevf_dev *hdev; hdev = container_of(work, struct hclgevf_dev, service_task); + handle = &hdev->nic; + + if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { + hclgevf_tqps_update_stats(handle); + hdev->stats_timer = 0; + } /* request the link status from the PF. PF would be able to tell VF * about such updates in future so we might remove this later @@ -1831,6 +1862,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) if (ret) return ret; + ret = hclgevf_get_pf_media_type(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ return hclgevf_get_tc_info(hdev); } @@ -1932,7 +1967,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) return ret; } - /* Initialize RSS indirect table for each vport */ + /* Initialize RSS indirect table */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; @@ -1945,9 +1980,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) { - /* other vlan config(like, VLAN TX/RX offload) would also be added - * here later - */ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, false); } @@ -1969,7 +2001,6 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); hclgevf_request_link_info(hdev); @@ -1991,7 +2022,6 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) for (i = 0; i < handle->kinfo.num_tqps; i++) hclgevf_reset_tqp(handle, i); - /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); hclgevf_update_link_status(hdev, 0); } @@ -2010,7 +2040,8 @@ static int hclgevf_client_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); + mod_timer(&hdev->keep_alive_timer, jiffies + + HCLGEVF_KEEP_ALIVE_TASK_INTERVAL * HZ); return hclgevf_set_alive(handle, true); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index f39c4b044bd746a5087bbba45fef2873798fa787..22bbf19739e55d14fb37bf87aed11968ba517c02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -12,10 +12,15 @@ #define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_DRIVER_NAME "hclgevf" +#define MAX_VLAN_ID 4095 #define HCLGEVF_MISC_VECTOR_NUM 0 #define HCLGEVF_INVALID_VPORT 0xffff +#define HCLGEVF_RESET_TASK_INTERVAL 5 +#define HCLGEVF_GENERAL_TASK_INTERVAL 5 +#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2 + /* This number in actual depends upon the total number of VFs * created by physical function. But the maximum number of * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}. @@ -62,6 +67,8 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) +#define HCLGEVF_STATS_TIMER_INTERVAL (36) + enum hclgevf_evt_cause { HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_MBX, @@ -220,6 +227,7 @@ struct hclgevf_dev { struct hnae3_client *nic_client; struct hnae3_client *roce_client; u32 flag; + u32 stats_timer; }; static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 1f21cedb2f6b2a546e3d7c4bad08357f3fff8f0a..c561165484cf2e1080f4d0b6494c91f20a16b8c4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -273,7 +273,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = le16_to_cpu(msg_q[1]); memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)le16_to_cpu(msg_q[4]); - hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]); /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); diff --git a/drivers/net/ethernet/hisilicon/hns3/kcompat.h b/drivers/net/ethernet/hisilicon/hns3/kcompat.h index dff319973287bdcb3a1362d7ac11b4a49e43d3ec..9dd90621769d94017613663e9aa4492ac441ad84 100644 --- a/drivers/net/ethernet/hisilicon/hns3/kcompat.h +++ b/drivers/net/ethernet/hisilicon/hns3/kcompat.h @@ -378,5 +378,19 @@ static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) #else #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 4)) + +#include + +static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) +{ + __set_bit(nr, addr); +} + +#else + +#define HAS_LINK_MODE_OPS + +#endif #endif