From 9228d6c15ba7fe558214295b7ee82955c9ea4e67 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Tue, 22 Jan 2019 19:14:46 +0800 Subject: [PATCH] driver: hns3: update hns3 driver from driver team driver inclusion category: feature ----------------------------------------- Based on dde21a4eedd17955f173f055e2d702dadb1e70ea ("performance optimizations") Signed-off-by: Yang Yingliang --- .../net/ethernet/hisilicon/hns3/hclge_mbx.h | 9 +- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 11 +- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 504 ++++++----- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 6 +- .../ethernet/hisilicon/hns3/hns3_ethtool.c | 17 +- .../hisilicon/hns3/hns3pf/hclge_cmd.c | 65 +- .../hisilicon/hns3/hns3pf/hclge_debugfs.c | 2 +- .../hisilicon/hns3/hns3pf/hclge_debugfs.h | 2 +- .../hisilicon/hns3/hns3pf/hclge_err.c | 235 +++-- .../hisilicon/hns3/hns3pf/hclge_err.h | 44 + .../hisilicon/hns3/hns3pf/hclge_main.c | 807 +++++++++++++----- .../hisilicon/hns3/hns3pf/hclge_main.h | 57 +- .../hisilicon/hns3/hns3pf/hclge_mbx.c | 122 ++- .../hisilicon/hns3/hns3vf/hclgevf_main.c | 200 ++++- .../hisilicon/hns3/hns3vf/hclgevf_main.h | 5 + .../hisilicon/hns3/hns3vf/hclgevf_mbx.c | 26 +- 16 files changed, 1529 insertions(+), 583 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 811cf7e512b2..91dbe2a6e056 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -41,6 +41,11 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ + HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ + HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + + HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ }; /* below are per-VF mac-vlan subcodes */ @@ -58,10 +63,12 @@ enum hclge_mbx_vlan_cfg_subcode { HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ + HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port base vlan configuration */ + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,/* get port base vlan state */ }; #define HCLGE_MBX_MAX_MSG_SIZE 16 -#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index ca5eb99d5f82..e6895dc5b46b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -146,6 +146,13 @@ enum hnae3_flr_state { HNAE3_FLR_DONE, }; +enum hnae3_port_base_vlan_state { + HNAE3_PORT_BASE_VLAN_DISABLE, + HNAE3_PORT_BASE_VLAN_ENABLE, + HNAE3_PORT_BASE_VLAN_MODIFY, + HNAE3_PORT_BASE_VLAN_NOCHANGE, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -459,7 +466,7 @@ struct hnae3_ae_ops { bool (*get_hw_reset_stat)(struct hnae3_handle *handle); bool (*ae_dev_resetting)(struct hnae3_handle *handle); unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); - int (*set_gro_en)(struct hnae3_handle *handle, int enable); + int (*set_gro_en)(struct hnae3_handle *handle, bool enable); void (*enable_timer_task)(struct hnae3_handle *handle, bool enable); int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf); pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev); @@ -582,6 +589,8 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ + enum hnae3_port_base_vlan_state port_base_vlan_state; + u8 netdev_flags; struct dentry *hnae3_dbgfs; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index c55d01634196..75ffa47e5252 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -22,6 +22,8 @@ #include "hnae3.h" #include "hns3_enet.h" +#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) + static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); static void hns3_remove_hw_addr(struct net_device *netdev); @@ -376,6 +378,29 @@ static int hns3_nic_net_up(struct net_device *netdev) return ret; } +static void hns3_config_xps(struct hns3_nic_priv *priv) +{ + int i; + + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; + struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; + + while (ring) { + int ret; + + ret = netif_set_xps_queue(priv->netdev, + &tqp_vector->affinity_mask, + ring->tqp->tqp_index); + if (ret) + netdev_warn(priv->netdev, + "set xps queue failed: %d", ret); + + ring = ring->next; + } + } +} + static int hns3_nic_net_open(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -411,6 +436,7 @@ static int hns3_nic_net_open(struct net_device *netdev) if (h->ae_algo->ops->enable_timer_task) h->ae_algo->ops->enable_timer_task(priv->ae_handle, true); + hns3_config_xps(priv); return 0; } @@ -597,7 +623,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, return 0; ret = skb_cow_head(skb, 0); - if (ret) + if (unlikely(ret)) return ret; l3.hdr = skb_network_header(skb); @@ -641,7 +667,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* normal or tunnel packet*/ l4_offset = l4.hdr - skb->data; - hdr_len = (l4.tcp->doff * 4) + l4_offset; + hdr_len = (l4.tcp->doff << 2) + l4_offset; /* remove payload length from inner pseudo checksum when tso*/ l4_paylen = skb->len - l4_offset; @@ -650,8 +676,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae3_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -730,21 +755,19 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute L2 header size for normal packet, defined in 2 Bytes */ l2_len = l3.hdr - skb->data; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); /* tunnel packet*/ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_S, ol2_len >> 1); /* compute OL3 header size, defined in 4 Bytes */ ol3_len = l4.hdr - l3.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, + ol3_len >> 2); /* MAC in UDP, MAC in GRE (0x6558)*/ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { @@ -753,17 +776,16 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute OL4 header size, defined in 4 Bytes. */ ol4_len = l2_hdr - l4.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - ol4_len >> 2); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L4LEN_S, ol4_len >> 2); /* switch IP header ptr from outer to inner header */ l3.hdr = skb_inner_network_header(skb); /* compute inner l2 header size, defined in 2 Bytes. */ l2_len = l3.hdr - l2_hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, + l2_len >> 1); } else { /* skb packet types not supported by hardware, * txbd len fild doesn't be filled. @@ -779,24 +801,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute inner(/normal) L3 header size, defined in 4 Bytes */ l3_len = l4.hdr - l3.hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + l4.tcp->doff); break; case IPPROTO_SCTP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct sctphdr) >> 2)); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; case IPPROTO_UDP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct udphdr) >> 2)); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; default: /* skb packet types not supported by hardware, @@ -827,6 +846,44 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) return true; } +static int hns3_check_l4_proto(struct sk_buff *skb, u32 l4_proto, + u32 *type_cs_vlan_tso) +{ + switch (l4_proto) { + case IPPROTO_TCP: + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + break; + case IPPROTO_UDP: + if (hns3_tunnel_csum_bug(skb)) + break; + + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + break; + case IPPROTO_SCTP: + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + } + + return 0; +} + static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) @@ -841,34 +898,30 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* define outer network header type.*/ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); else - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); } /* define tunnel type(OL4).*/ switch (l4_proto) { case IPPROTO_UDP: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); break; case IPPROTO_GRE: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -889,69 +942,27 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, } if (l3.v4->version == 4) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); } else if (l3.v6->version == 6) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - } - - switch (l4_proto) { - case IPPROTO_TCP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); - break; - case IPPROTO_UDP: - if (hns3_tunnel_csum_bug(skb)) - break; - - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); - break; - case IPPROTO_SCTP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); - break; - default: - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - skb_checksum_help(skb); - return 0; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV6); } - return 0; + return hns3_check_l4_proto(skb, l4_proto, type_cs_vlan_tso); } static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); + hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); } static int hns3_fill_desc_vtags(struct sk_buff *skb, @@ -963,6 +974,8 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, { #define HNS3_TX_VLAN_PRIO_SHIFT 13 +struct hnae3_handle *handle = tx_ring->tqp->handle; + if (skb->protocol == htons(ETH_P_8021Q) && !(tx_ring->tqp->handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { @@ -984,8 +997,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); - *out_vtag = vlan_tag; + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE){ + hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, + 1); + *out_vtag = vlan_tag; + } else { + hns3_set_field(*inner_vlan_flag, + HNS3_TXD_VLAN_B, 1); + *inner_vtag = vlan_tag; + } } else { hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; @@ -995,7 +1016,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, int rc; rc = skb_cow_head(skb, 0); - if (rc < 0) + if (unlikely(rc < 0)) return rc; vhdr = (struct vlan_ethhdr *)skb->data; vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) @@ -1012,26 +1033,21 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - u32 ol_type_vlan_len_msec = 0; u16 bdtp_fe_sc_vld_ra_ri = 0; struct skb_frag_struct *frag; unsigned int frag_buf_num; - u32 type_cs_vlan_tso = 0; - struct sk_buff *skb; - u16 inner_vtag = 0; - u16 out_vtag = 0; - unsigned int k; - int sizeoflast; - u32 paylen = 0; + int k, sizeoflast; dma_addr_t dma; - u16 mss = 0; - u8 ol4_proto; - u8 il4_proto; - int ret; if (type == DESC_TYPE_SKB) { - skb = (struct sk_buff *)priv; - paylen = skb->len; + struct sk_buff *skb = (struct sk_buff *)priv; + u32 ol_type_vlan_len_msec = 0; + u32 type_cs_vlan_tso = 0; + u32 paylen = skb->len; + u16 inner_vtag = 0; + u16 out_vtag = 0; + u16 mss = 0; + int ret; ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, &ol_type_vlan_len_msec, @@ -1040,10 +1056,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return ret; if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ol4_proto, il4_proto; + skb_reset_mac_len(skb); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (ret) + if (unlikely(ret)) return ret; hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, @@ -1051,12 +1069,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, &ol_type_vlan_len_msec); - if (ret) + if (unlikely(ret)) return ret; ret = hns3_set_tso(skb, &paylen, &mss, &type_cs_vlan_tso); - if (ret) + if (unlikely(ret)) return ret; } @@ -1076,15 +1094,15 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } - if (dma_mapping_error(ring->dev, dma)) { + if (unlikely(dma_mapping_error(ring->dev, dma))) { ring->stats.sw_err_cnt++; return -ENOMEM; } desc_cb->length = size; - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - sizeoflast = size % HNS3_MAX_BD_SIZE; + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; + sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ @@ -1126,12 +1144,10 @@ static bool hns3_check_skb_need_linearize(struct sk_buff *skb) for (j = 0; j < 7; j++) total_length += - skb_frag_size(&skb_shinfo(skb)->frags[j]); + skb_frag_size(&skb_shinfo(skb)->frags[j + i]); if (total_length < skb_shinfo(skb)->gso_size) return true; - - total_length = 0; } return false; @@ -1150,15 +1166,16 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, int i; size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; frag_num = skb_shinfo(skb)->nr_frags; for (i = 0; i < frag_num; i++) { frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); bdnum_for_frag = - (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + (size + HNS3_MAX_BD_SIZE - 1) >> + HNS3_MAX_BD_SIZE_OFFSET; + if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) return -ENOMEM; buf_num += bdnum_for_frag; @@ -1168,7 +1185,8 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, if (!hns3_check_skb_need_linearize(skb)) goto out; - buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> + HNS3_MAX_BD_SIZE_OFFSET; if (ring_space(ring) < buf_num) return -EBUSY; /* manual split the send packet */ @@ -1198,7 +1216,8 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, buf_num = skb_shinfo(skb)->nr_frags + 1; if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> + HNS3_MAX_BD_SIZE_OFFSET; if (ring_space(ring) < buf_num) return -EBUSY; /* manual split the send packet */ @@ -1290,9 +1309,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; - ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); - if (ret) + ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (unlikely(ret)) goto head_fill_err; next_to_use_frag = ring->next_to_use; @@ -1301,11 +1320,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); - ret = priv->ops.fill_desc(ring, frag, size, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); + ret = hns3_fill_desc(ring, frag, size, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); - if (ret) + if (unlikely(ret)) goto frag_fill_err; } @@ -1382,6 +1401,7 @@ static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t changed = netdev->features ^ features; struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; + bool enable; int ret; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { @@ -1393,10 +1413,8 @@ static int hns3_nic_set_features(struct net_device *netdev, #ifdef NETIF_F_GRO_HW if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { - if (features & NETIF_F_GRO_HW) - ret = h->ae_algo->ops->set_gro_en(h, true); - else - ret = h->ae_algo->ops->set_gro_en(h, false); + enable = !!(features & NETIF_F_GRO_HW); + ret = h->ae_algo->ops->set_gro_en(h, enable); if (ret) return ret; } @@ -1404,28 +1422,21 @@ static int hns3_nic_set_features(struct net_device *netdev, if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && h->ae_algo->ops->enable_vlan_filter) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - h->ae_algo->ops->enable_vlan_filter(h, true); - else - h->ae_algo->ops->enable_vlan_filter(h, false); + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + h->ae_algo->ops->enable_vlan_filter(h, enable); } if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && h->ae_algo->ops->enable_hw_strip_rxvtag) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); - else - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); - + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); if (ret) return ret; } if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { - if (features & NETIF_F_NTUPLE) - h->ae_algo->ops->enable_fd(h, true); - else - h->ae_algo->ops->enable_fd(h, false); + enable = !!(features & NETIF_F_NTUPLE); + h->ae_algo->ops->enable_fd(h, enable); } netdev->features = features; @@ -2387,13 +2398,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; /* check if hardware has done checksum */ - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { + if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | + BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL4E_B)))) { u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; u64_stats_update_end(&ring->syncp); @@ -2401,11 +2410,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); - ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); switch (ol4_type) { @@ -2413,6 +2417,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; case HNS3_OL4_TYPE_NO_TUN: + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ if ((l3_type == HNS3_L3_TYPE_IPV4 || l3_type == HNS3_L3_TYPE_IPV6) && @@ -2438,6 +2447,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, struct hns3_desc *desc, u32 l234info, u16 *vlan_tag) { + struct hnae3_handle *handle = ring->tqp->handle; struct pci_dev *pdev = ring->tqp->handle->pdev; if (pdev->revision == HNAE3_REVISION_ID_20) { @@ -2450,14 +2460,37 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 +#define HNS3_STRP_BOTH 0x3 + /* Hardware always insert vlan tag into rx descriptor when + * remove the tag from packet, driver needs to determin + * reporting which tag to stack. + */ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: - *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - return true; + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) { + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + return true; + } + + return false; case HNS3_STRP_INNER_VLAN: - *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) { + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; + } + + return false; + case HNS3_STRP_BOTH: + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + else + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; default: return false; @@ -2533,11 +2566,13 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); } - while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + /* make sure HW write desc complete */ + rmb(); + if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) return -ENXIO; if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { @@ -2627,11 +2662,37 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, HNS3_RXD_GRO_SIZE_S); } +static int hns3_check_l2l3l4_info(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info) +{ + enum hns3_pkt_l2t_type l2_frame_type; + struct sk_buff *skb = ring->skb; + + if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { + u64_stats_update_begin(&ring->syncp); + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + + return 0; +} + static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - enum hns3_pkt_l2t_type l2_frame_type; struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; @@ -2649,7 +2710,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) return -ENXIO; if (!skb) @@ -2705,38 +2766,9 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, vlan_tag); } - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.non_vld_descs++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (unlikely((!desc->rx.pkt_len) || - hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.err_pkt_len++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } - - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l2_err++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } - - l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, - HNS3_RXD_DMAC_S); - if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) - ring->stats.rx_multicast++; + ret = hns3_check_l2l3l4_info(ring, desc, l234info); + if (ret) + return ret; u64_stats_update_begin(&ring->syncp); ring->stats.rx_pkts++; @@ -2825,6 +2857,46 @@ int hns3_clean_rx_ring( return recv_pkts; } +static enum hns3_flow_level_range hns3_get_new_flow_level( + struct hns3_enet_ring_group *ring_group, + enum hns3_flow_level_range new_flow_level, + int packets_per_msecs, int bytes_per_msecs) +{ + struct hns3_enet_tqp_vector *tqp_vector = + ring_group->ring->tqp_vector; + + +#define HNS3_RX_LOW_BYTE_RATE 10000 +#define HNS3_RX_MID_BYTE_RATE 20000 + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) + new_flow_level = HNS3_FLOW_MID; + break; + case HNS3_FLOW_MID: + if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) + new_flow_level = HNS3_FLOW_HIGH; + else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) + new_flow_level = HNS3_FLOW_LOW; + break; + case HNS3_FLOW_HIGH: + case HNS3_FLOW_ULTRA: + default: + if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) + new_flow_level = HNS3_FLOW_MID; + break; + } + +#define HNS3_RX_ULTRA_PACKET_RATE 40 + + if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && + &tqp_vector->rx_group == ring_group) + new_flow_level = HNS3_FLOW_ULTRA; + + return new_flow_level; +} + static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) { struct hns3_enet_tqp_vector *tqp_vector = @@ -2835,7 +2907,7 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) u32 time_passed_ms; u16 new_int_gl; - if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) + if (!tqp_vector->last_jiffies) return false; if (ring_group->total_packets == 0) { @@ -2864,34 +2936,9 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) do_div(ring_group->total_bytes, time_passed_ms); bytes_per_msecs = ring_group->total_bytes; -#define HNS3_RX_LOW_BYTE_RATE 10000 -#define HNS3_RX_MID_BYTE_RATE 20000 - - switch (new_flow_level) { - case HNS3_FLOW_LOW: - if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE) - new_flow_level = HNS3_FLOW_MID; - break; - case HNS3_FLOW_MID: - if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE) - new_flow_level = HNS3_FLOW_HIGH; - else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE) - new_flow_level = HNS3_FLOW_LOW; - break; - case HNS3_FLOW_HIGH: - case HNS3_FLOW_ULTRA: - default: - if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE) - new_flow_level = HNS3_FLOW_MID; - break; - } - -#define HNS3_RX_ULTRA_PACKET_RATE 40 - - if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE && - &tqp_vector->rx_group == ring_group) - new_flow_level = HNS3_FLOW_ULTRA; - + new_flow_level = hns3_get_new_flow_level(ring_group, new_flow_level, + packets_per_msecs, + bytes_per_msecs); switch (new_flow_level) { case HNS3_FLOW_LOW: new_int_gl = HNS3_INT_GL_50K; @@ -2938,7 +2985,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) } if (tx_group->coal.gl_adapt_enable) { - tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); + tx_update = hns3_get_new_int_gl(tx_group); if (tx_update) hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_group->coal.int_gl); @@ -3628,7 +3675,6 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - priv->ops.fill_desc = hns3_fill_desc; if ((netdev->features & NETIF_F_TSO) || (netdev->features & NETIF_F_TSO6)) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 5fb44d425e06..79ab2b6afd5b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -184,6 +184,8 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) +#define HNS3_TX_LAST_SIZE_M 0xffff + #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) @@ -191,6 +193,7 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_SIZE_OFFSET 16 #define HNS3_MAX_BD_PER_FRAG 8 #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS @@ -441,11 +444,8 @@ struct hns3_nic_ring_data { }; struct hns3_nic_ops { - int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, int frag_end, enum hns_desc_type type); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring); - void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); }; enum hns3_flow_level_range { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index de6234b4accc..620cb491843a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -470,8 +470,15 @@ static void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct hnae3_handle *h = hns3_get_handle(netdev); + struct hns3_nic_priv *priv = h->priv; u64 *p = data; + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting!"); + return; + } + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { netdev_err(netdev, "could not get any statistics\n"); return; @@ -1077,7 +1084,7 @@ static void hns3_get_regs(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo->ops->get_regs) + if (!h->ae_algo->ops->get_regs || !data) return; h->ae_algo->ops->get_regs(h, &cmd->version, data); @@ -1096,11 +1103,13 @@ static int hns3_set_phys_id(struct net_device *netdev, struct ethtool_ops hns3vf_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, + .get_link = hns3_get_link, .get_ringparam = hns3_get_ringparam, .set_ringparam = hns3_set_ringparam, .get_strings = hns3_get_strings, .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, + .get_channels = hns3_get_channels, .get_rxnfc = hns3_get_rxnfc, .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, @@ -1108,10 +1117,8 @@ struct ethtool_ops hns3vf_ethtool_ops = { .get_rxfh = hns3_get_rss, .set_rxfh = hns3_set_rss, .get_link_ksettings = hns3_get_link_ksettings, - .get_channels = hns3_get_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, - .get_link = hns3_get_link, .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, }; @@ -1127,6 +1134,8 @@ struct ethtool_ops hns3_ethtool_ops = { .get_strings = hns3_get_strings, .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, + .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, .get_rxnfc = hns3_get_rxnfc, .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, @@ -1136,8 +1145,6 @@ struct ethtool_ops hns3_ethtool_ops = { .get_link_ksettings = hns3_get_link_ksettings, .set_link_ksettings = hns3_set_link_ksettings, .nway_reset = hns3_nway_reset, - .get_channels = hns3_get_channels, - .set_channels = hns3_set_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, .get_regs_len = hns3_get_regs_len, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 5907a4114c4c..c8baf58a19fd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -184,6 +184,43 @@ static bool hclge_is_special_opcode(u16 opcode) return false; } +static int hclge_cmd_check_retval(struct hclge_hw *hw, + struct hclge_desc *desc, + int num, int *ntc) +{ + struct hclge_desc *desc_to_use; + u16 opcode, desc_ret; + int handle = 0; + int retval = 0; + + opcode = le16_to_cpu(desc[0].opcode); + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[*ntc]; + desc[handle] = *desc_to_use; + + if (likely(!hclge_is_special_opcode(opcode))) + desc_ret = le16_to_cpu(desc[handle].retval); + else + desc_ret = le16_to_cpu(desc[0].retval); + + if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) + retval = 0; + else if (desc_ret == HCLGE_CMD_NO_AUTH) + retval = -EPERM; + else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED) + retval = -EOPNOTSUPP; + else + retval = -EIO; + hw->cmq.last_status = desc_ret; + (*ntc)++; + handle++; + if (*ntc == hw->cmq.csq.desc_num) + *ntc = 0; + } + return retval; +} + + /** * hclge_cmd_send - send command to command queue * @hw: pointer to the hw struct @@ -201,7 +238,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) u32 timeout = 0; int handle = 0; int retval = 0; - u16 opcode, desc_ret; int ntc; spin_lock_bh(&hw->cmq.csq.lock); @@ -217,7 +253,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) * which will be use for hardware to write back */ ntc = hw->cmq.csq.next_to_use; - opcode = le16_to_cpu(desc[0].opcode); while (handle < num) { desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; *desc_to_use = desc[handle]; @@ -248,31 +283,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) if (!complete) { retval = -EAGAIN; } else { - handle = 0; - while (handle < num) { - /* Get the result of hardware write back */ - desc_to_use = &hw->cmq.csq.desc[ntc]; - desc[handle] = *desc_to_use; - - if (likely(!hclge_is_special_opcode(opcode))) - desc_ret = le16_to_cpu(desc[handle].retval); - else - desc_ret = le16_to_cpu(desc[0].retval); - - if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) - retval = 0; - else if (desc_ret == HCLGE_CMD_NO_AUTH) - retval = -EPERM; - else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED) - retval = -EOPNOTSUPP; - else - retval = -EIO; - hw->cmq.last_status = desc_ret; - ntc++; - handle++; - if (ntc == hw->cmq.csq.desc_num) - ntc = 0; - } + retval = hclge_cmd_check_retval(hw, desc, num, &ntc); } /* Clean the command send queue */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index e8ad9029009b..5df3743d80cf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -523,7 +523,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); - dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n", + dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n", nq_to_qs_map->qset_id); cmd = HCLGE_OPC_TM_PG_WEIGHT; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index d674fdabd589..21073a2930f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -482,7 +482,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { {true, "CGE_IGU_AFIFO_DFX_1"}, {true, "CGE_EGU_AFIFO_DFX_0"}, - {true, "CGE_IGU_AFIFO_DFX_1"}, + {true, "CGE_EGU_AFIFO_DFX_1"}, {false, "Reserved"}, {false, "Reserved"}, {false, "Reserved"}, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 480c30d8af92..297a5fc67f49 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -3,7 +3,7 @@ #include "hclge_err.h" -static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { +const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, @@ -16,7 +16,7 @@ static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { +const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, @@ -36,7 +36,7 @@ static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { +const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, @@ -46,19 +46,19 @@ static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { +const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" }, { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_igu_int[] = { +const struct hclge_hw_error hclge_igu_int[] = { { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { +const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, @@ -68,12 +68,12 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ncsi_err_int[] = { +const struct hclge_hw_error hclge_ncsi_err_int[] = { { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { +const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, @@ -112,13 +112,13 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { +const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" }, { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { +const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, @@ -128,7 +128,7 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_tm_sch_rint[] = { +const struct hclge_hw_error hclge_tm_sch_rint[] = { { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" }, { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" }, @@ -173,7 +173,7 @@ static const struct hclge_hw_error hclge_tm_sch_rint[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { +const struct hclge_hw_error hclge_qcn_fifo_rint[] = { { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" }, { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" }, { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" }, @@ -195,7 +195,7 @@ static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { +const struct hclge_hw_error hclge_qcn_ecc_rint[] = { { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, @@ -210,7 +210,7 @@ static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { +const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" }, { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" }, @@ -228,7 +228,7 @@ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { +const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" }, { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" }, { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" }, @@ -251,7 +251,7 @@ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { +const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" }, { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" }, @@ -259,7 +259,7 @@ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { +const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { { .int_msk = BIT(0), .msg = "over_8bd_no_fe" }, { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" }, { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" }, @@ -269,7 +269,7 @@ static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ssu_com_err_int[] = { +const struct hclge_hw_error hclge_ssu_com_err_int[] = { { .int_msk = BIT(0), .msg = "buf_sum_err" }, { .int_msk = BIT(1), .msg = "ppp_mb_num_err" }, { .int_msk = BIT(2), .msg = "ppp_mbid_err" }, @@ -283,7 +283,43 @@ static const struct hclge_hw_error hclge_ssu_com_err_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { +const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { + { .int_msk = BIT(0), .msg = "ssu_mem0_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "ssu_mem1_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "ssu_mem2_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "ssu_mem3_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "ssu_mem4_ecc_mbit_err" }, + { .int_msk = BIT(5), .msg = "ssu_mem5_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "ssu_mem6_ecc_mbit_err" }, + { .int_msk = BIT(7), .msg = "ssu_mem7_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "ssu_mem8_ecc_mbit_err" }, + { .int_msk = BIT(9), .msg = "ssu_mem9_ecc_mbit_err" }, + { .int_msk = BIT(10), .msg = "ssu_mem10_ecc_mbit_err" }, + { .int_msk = BIT(11), .msg = "ssu_mem11_ecc_mbit_err" }, + { .int_msk = BIT(12), .msg = "ssu_mem12_ecc_mbit_err" }, + { .int_msk = BIT(13), .msg = "ssu_mem13_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "ssu_mem14_ecc_mbit_err" }, + { .int_msk = BIT(15), .msg = "ssu_mem15_ecc_mbit_err" }, + { .int_msk = BIT(16), .msg = "ssu_mem16_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "ssu_mem17_ecc_mbit_err" }, + { .int_msk = BIT(18), .msg = "ssu_mem18_ecc_mbit_err" }, + { .int_msk = BIT(19), .msg = "ssu_mem19_ecc_mbit_err" }, + { .int_msk = BIT(20), .msg = "ssu_mem20_ecc_mbit_err" }, + { .int_msk = BIT(21), .msg = "ssu_mem21_ecc_mbit_err" }, + { .int_msk = BIT(22), .msg = "ssu_mem22_ecc_mbit_err" }, + { .int_msk = BIT(23), .msg = "ssu_mem23_ecc_mbit_err" }, + { .int_msk = BIT(24), .msg = "ssu_mem24_ecc_mbit_err" }, + { .int_msk = BIT(25), .msg = "ssu_mem25_ecc_mbit_err" }, + { .int_msk = BIT(26), .msg = "ssu_mem26_ecc_mbit_err" }, + { .int_msk = BIT(27), .msg = "ssu_mem27_ecc_mbit_err" }, + { .int_msk = BIT(28), .msg = "ssu_mem28_ecc_mbit_err" }, + { .int_msk = BIT(29), .msg = "ssu_mem29_ecc_mbit_err" }, + { .int_msk = BIT(30), .msg = "ssu_mem30_ecc_mbit_err" }, + { .int_msk = BIT(31), .msg = "ssu_mem31_ecc_mbit_err" }, + { /* sentinel */ } +}; + +const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" }, @@ -300,7 +336,7 @@ static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { +const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { { .int_msk = BIT(0), .msg = "ig_mac_inf_int" }, { .int_msk = BIT(1), .msg = "ig_host_inf_int" }, { .int_msk = BIT(2), .msg = "ig_roc_buf_int" }, @@ -328,7 +364,7 @@ static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { +const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" }, { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" }, { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" }, @@ -336,14 +372,14 @@ static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { +const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, { .int_msk = BIT(9), .msg = "low_water_line_err_port" }, { .int_msk = BIT(10), .msg = "hi_water_line_err_port" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { +const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" }, { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" }, { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" }, @@ -367,9 +403,9 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { { /* sentinel */ } }; -static void hclge_log_error(struct device *dev, char *reg, - const struct hclge_hw_error *err, - u32 err_sts) +void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts) { while (err->msg) { if (err->int_msk & err_sts) @@ -379,6 +415,49 @@ static void hclge_log_error(struct device *dev, char *reg, } } +int hclge_query_error(struct hclge_dev *hdev, struct hclge_desc *desc, + enum hclge_opcode_type opcode, int num) +{ + hclge_cmd_setup_basic_desc(&desc[0], opcode, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + return hclge_cmd_send(&hdev->hw, &desc[0], num); +} + +int hclge_clear_error(struct hclge_dev *hdev, struct hclge_desc *desc, int num) +{ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + return hclge_cmd_send(&hdev->hw, &desc[0], num); +} + +struct hclge_desc *hclge_query_bd_num(struct hclge_dev *hdev, + struct hclge_bd_num *bd_num, + enum hclge_opcode_type opcode) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc_bd; + struct hclge_desc *desc; + int ret; + + /* query the number of registers in the RAS int status */ + hclge_cmd_setup_basic_desc(&desc_bd, opcode, true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "fail(%d) to query int status bd num\n", ret); + return NULL; + } + + bd_num->mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + bd_num->pf_bd_num = le32_to_cpu(desc_bd.data[1]); + bd_num->max_bd_num = max_t(u32, le32_to_cpu(desc_bd.data[0]), + le32_to_cpu(desc_bd.data[1])); + + desc = kcalloc(bd_num->max_bd_num, sizeof(struct hclge_desc), + GFP_KERNEL); + + return desc; +} + /* hclge_cmd_query_error: read the error information * @hdev: pointer to struct hclge_dev * @desc: descriptor for describing the command @@ -793,11 +872,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, int ret; /* query all main PF RAS errors */ - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT, - true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + ret = hclge_query_error(hdev, &desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT, + num); if (ret) { dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret); return ret; @@ -841,13 +917,15 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*(desc_data + 2)); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n"); + hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n"); + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } @@ -935,10 +1013,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, } /* clear all main PF RAS errors */ - hclge_cmd_reuse_desc(&desc[0], false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + ret = hclge_clear_error(hdev, &desc[0], num); if (ret) dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret); @@ -964,11 +1039,8 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, int ret; /* query all PF RAS errors */ - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT, - true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + ret = hclge_query_error(hdev, &desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT, + num); if (ret) { dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret); return ret; @@ -1003,11 +1075,15 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", &hclge_igu_egu_tnl_int[0], status); - /* clear all PF RAS errors */ - hclge_cmd_reuse_desc(&desc[0], false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], status); - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + /* clear all PF RAS errors */ + ret = hclge_clear_error(hdev, &desc[0], num); if (ret) dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret); @@ -1016,40 +1092,71 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, static int hclge_handle_all_ras_errors(struct hclge_dev *hdev) { - struct device *dev = &hdev->pdev->dev; - u32 mpf_bd_num, pf_bd_num, bd_num; - struct hclge_desc desc_bd; + struct hclge_bd_num bd_num; struct hclge_desc *desc; int ret; /* query the number of registers in the RAS int status */ - hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM, - true); - ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + desc = hclge_query_bd_num(hdev, &bd_num, + HCLGE_QUERY_RAS_INT_STS_BD_NUM); + if (!desc) + return -ENOMEM; + + /* handle all main PF RAS errors */ + ret = hclge_handle_mpf_ras_error(hdev, desc, bd_num.mpf_bd_num); if (ret) { - dev_err(dev, "fail(%d) to query ras int status bd num\n", ret); + kfree(desc); return ret; } - mpf_bd_num = le32_to_cpu(desc_bd.data[0]); - pf_bd_num = le32_to_cpu(desc_bd.data[1]); - bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + memset(desc, 0, bd_num.max_bd_num * sizeof(struct hclge_desc)); - desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + /* handle all PF RAS errors */ + ret = hclge_handle_pf_ras_error(hdev, desc, bd_num.pf_bd_num); + kfree(desc); + + return ret; +} + +int hclge_clear_all_ras_errors(struct hclge_dev *hdev) +{ + struct hclge_bd_num bd_num; + struct hclge_desc *desc; + int ret; + + /* query the number of registers in the RAS int status */ + desc = hclge_query_bd_num(hdev, &bd_num, + HCLGE_QUERY_RAS_INT_STS_BD_NUM); if (!desc) return -ENOMEM; - /* handle all main PF RAS errors */ - ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num); + /* query all main PF RAS errors */ + ret = hclge_query_error(hdev, desc, HCLGE_QUERY_CLEAR_MPF_RAS_INT, + bd_num.mpf_bd_num); if (ret) { kfree(desc); return ret; } - memset(desc, 0, bd_num * sizeof(struct hclge_desc)); - /* handle all PF RAS errors */ - ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num); - kfree(desc); + /* clear all main PF RAS errors */ + ret = hclge_clear_error(hdev, desc, bd_num.mpf_bd_num); + if (ret) { + kfree(desc); + return ret; + } + memset(desc, 0, bd_num.max_bd_num * sizeof(struct hclge_desc)); + /* query all PF RAS errors */ + ret = hclge_query_error(hdev, desc, HCLGE_QUERY_CLEAR_PF_RAS_INT, + bd_num.pf_bd_num); + if (ret) { + kfree(desc); + return ret; + } + + /* clear all PF RAS errors */ + ret = hclge_clear_error(hdev, desc, bd_num.pf_bd_num); + + kfree(desc); return ret; } @@ -1184,7 +1291,7 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) +int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 45c6d93d0459..044849be20b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -79,6 +79,7 @@ #define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) #define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) #define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) +#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 #define HCLGE_PPU_PF_INT_MSIX_MASK 0x27 #define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) #define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) @@ -113,7 +114,50 @@ struct hclge_hw_error { const char *msg; }; +struct hclge_bd_num { + u32 mpf_bd_num; + u32 pf_bd_num; + u32 max_bd_num; +}; + +extern const struct hclge_hw_error hclge_imp_tcm_ecc_int[]; +extern const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[]; +extern const struct hclge_hw_error hclge_tqp_int_ecc_int[]; +extern const struct hclge_hw_error hclge_msix_sram_ecc_int[]; +extern const struct hclge_hw_error hclge_igu_int[]; +extern const struct hclge_hw_error hclge_igu_egu_tnl_int[]; +extern const struct hclge_hw_error hclge_ncsi_err_int[]; +extern const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[]; +extern const struct hclge_hw_error hclge_ppp_pf_abnormal_int[]; +extern const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[]; +extern const struct hclge_hw_error hclge_tm_sch_rint[]; +extern const struct hclge_hw_error hclge_qcn_fifo_rint[]; +extern const struct hclge_hw_error hclge_qcn_ecc_rint[]; +extern const struct hclge_hw_error hclge_mac_afifo_tnl_int[]; +extern const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[]; +extern const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[]; +extern const struct hclge_hw_error hclge_ppu_pf_abnormal_int[]; +extern const struct hclge_hw_error hclge_ssu_com_err_int[]; +extern const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[]; +extern const struct hclge_hw_error hclge_ssu_port_based_err_int[]; +extern const struct hclge_hw_error hclge_ssu_fifo_overflow_int[]; +extern const struct hclge_hw_error hclge_ssu_ets_tcg_int[]; +extern const struct hclge_hw_error hclge_ssu_port_based_pf_int[]; +extern const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[]; + int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); +int hclge_clear_all_ras_errors(struct hclge_dev *hdev); pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); void hclge_handle_hw_msix_error(struct hclge_dev *hdev); +int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev); +void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts); +int hclge_query_error(struct hclge_dev *hdev, struct hclge_desc *desc, + enum hclge_opcode_type opcode, int num); +int hclge_clear_error(struct hclge_dev *hdev, struct hclge_desc *desc, int num); +struct hclge_desc *hclge_query_bd_num(struct hclge_dev *hdev, + struct hclge_bd_num *bd_num, + enum hclge_opcode_type opcode); + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 29ad17f4ac28..6d5035685a91 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -35,6 +35,10 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, u16 *allocated_size, bool is_alloc); +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl); +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl); struct hnae3_ae_algo ae_algo; @@ -1294,6 +1298,9 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->back = hdev; vport->vport_id = i; vport->mps = HCLGE_MAC_DEFAULT_FRAME; + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->rxvlan_cfg.rx_vlan_offload_en = true; + INIT_LIST_HEAD(&vport->vlan_list); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1501,13 +1508,14 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; - if (total_size < hdev->tx_buf_size) - return -ENOMEM; + if (hdev->hw_tc_map & BIT(i)) { + if (total_size < hdev->tx_buf_size) + return -ENOMEM; - if (hdev->hw_tc_map & BIT(i)) priv->tx_buf_size = hdev->tx_buf_size; - else + } else { priv->tx_buf_size = 0; + } total_size -= priv->tx_buf_size; } @@ -1515,66 +1523,15 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, return 0; } -/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs - * @hdev: pointer to struct hclge_dev - * @buf_alloc: pointer to buffer calculation data - * @return: 0: calculate sucessful, negative: fail - */ -static int hclge_rx_buffer_calc(struct hclge_dev *hdev, - struct hclge_pkt_buf_alloc *buf_alloc) +static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, + struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size, aligned_mps; - int no_pfc_priv_num, pfc_priv_num; - struct hclge_priv_buf *priv; + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); int i; - aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); - rx_all -= hclge_get_tx_buff_alloced(buf_alloc); - - /* When DCB is not supported, rx private - * buffer is not allocated. - */ - if (!hnae3_dev_dcb_supported(hdev)) { - if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return -ENOMEM; - - return 0; - } - - /* step 1, try to alloc private buffer for all enabled tc */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; - if (hdev->hw_tc_map & BIT(i)) { - priv->enable = 1; - if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = aligned_mps; - priv->wl.high = - roundup(priv->wl.low + aligned_mps, - HCLGE_BUF_SIZE_UNIT); - priv->buf_size = priv->wl.high + - hdev->dv_buf_size; - } else { - priv->wl.low = 0; - priv->wl.high = 2 * aligned_mps; - priv->buf_size = priv->wl.high + - hdev->dv_buf_size; - } - } else { - priv->enable = 0; - priv->wl.low = 0; - priv->wl.high = 0; - priv->buf_size = 0; - } - } - - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; - - /* step 2, try to decrease the buffer size of - * no pfc TC's private buffer - */ - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; priv->enable = 0; priv->wl.low = 0; @@ -1587,28 +1544,30 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = 256; - priv->wl.high = priv->wl.low + aligned_mps; - priv->buf_size = priv->wl.high + hdev->dv_buf_size; + priv->wl.low = max ? aligned_mps : 256; + priv->wl.high = roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); } else { priv->wl.low = 0; - priv->wl.high = aligned_mps; - priv->buf_size = priv->wl.high + hdev->dv_buf_size; + priv->wl.high = max ? (aligned_mps * 2) : aligned_mps; } + + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 3, try to reduce the number of pfc disabled TCs, - * which have private buffer - */ - /* get the total no pfc enable TC number, which have private buffer */ - no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && !(hdev->tm_info.hw_pfc_map & BIT(i))) { @@ -1625,17 +1584,19 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 4, try to reduce the number of pfc enabled TCs - * which have private buffer. - */ - pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && hdev->tm_info.hw_pfc_map & BIT(i)) { @@ -1651,7 +1612,40 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, pfc_priv_num == 0) break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @buf_alloc: pointer to buffer calculation data + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + /* When DCB is not supported, rx private buffer is not allocated. */ + if (!hnae3_dev_dcb_supported(hdev)) { + u32 rx_all = hdev->pkt_buf_size; + + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + return -ENOMEM; + + return 0; + } + + if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) + return 0; + + /* try to decrease the buffer size */ + if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) + return 0; + + if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) return 0; return -ENOMEM; @@ -5186,12 +5180,14 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, &fs->m_u.usr_ip6_spec); break; - case ETHER_FLOW: + /* The flow type of fd rule has been checked before adding in to rule + * list. As other flow types have been handled, it must be ETHER_FLOW + * for the default case + */ + default: hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, &fs->m_u.ether_spec); break; - default: - return -EOPNOTSUPP; } hclge_fd_get_ext_info(fs, rule); @@ -5596,7 +5592,6 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, enum hclge_mac_vlan_tbl_opcode op) { struct hclge_dev *hdev = vport->back; - int return_status = -EIO; if (cmdq_resp) { dev_err(&hdev->pdev->dev, @@ -5607,52 +5602,52 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (op == HCLGE_MAC_VLAN_ADD) { if ((!resp_code) || (resp_code == 1)) { - return_status = 0; + return 0; } else if (resp_code == 2) { - return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for uc_overflow.\n"); + return -ENOSPC; } else if (resp_code == 3) { - return_status = -ENOSPC; dev_err(&hdev->pdev->dev, "add mac addr failed for mc_overflow.\n"); - } else { - dev_err(&hdev->pdev->dev, - "add mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOSPC; } + + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%d.\n", + resp_code); + return -EIO; } else if (op == HCLGE_MAC_VLAN_REMOVE) { if (!resp_code) { - return_status = 0; + return 0; } else if (resp_code == 1) { - return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "remove mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "remove mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOENT; } + + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%d.\n", + resp_code); + return -EIO; } else if (op == HCLGE_MAC_VLAN_LKUP) { if (!resp_code) { - return_status = 0; + return 0; } else if (resp_code == 1) { - return_status = -ENOENT; dev_dbg(&hdev->pdev->dev, "lookup mac addr failed for miss.\n"); - } else { - dev_err(&hdev->pdev->dev, - "lookup mac addr failed for undefined, code=%d.\n", - resp_code); + return -ENOENT; } - } else { - return_status = -EINVAL; + dev_err(&hdev->pdev->dev, - "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", - op); + "lookup mac addr failed for undefined, code=%d.\n", + resp_code); + return -EIO; } - return return_status; + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", op); + return -EINVAL; } static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) @@ -5697,13 +5692,20 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) } static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, - const u8 *addr) + const u8 *addr, bool is_mc) { const unsigned char *mac_addr = addr; u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | (mac_addr[0]) | (mac_addr[1] << 8); u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + if (is_mc) { + hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + } + new_req->mac_addr_hi32 = cpu_to_le32(high_val); new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } @@ -5974,14 +5976,13 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, HCLGE_MAC_EPORT_VFID_S, vport->vport_id); req.egress_port = cpu_to_le16(egress_port); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, false); /* Lookup the mac address in the mac_vlan table, and add * it if the entry is inexistent. Repeated unicast entry @@ -6039,9 +6040,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); if (!ret) hclge_update_umv_space(vport, true); @@ -6073,11 +6073,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { /* This mac addr exist, update VFID for it */ @@ -6123,11 +6119,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { /* This mac addr exist, remove this handle's VFID for it */ @@ -6153,6 +6145,118 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, return status; } +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg; + struct hclge_dev *hdev = vport->back; + struct list_head *list; + + if (!vport->vport_id) + return; + + mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); + if (!mac_cfg) + return; + + mac_cfg->vport_id = vport->vport_id; + mac_cfg->hd_tbl_status = true; + memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &hdev->uc_mac_list : &hdev->mc_mac_list; + + mutex_lock(&hdev->vport_cfg_mutex); + list_add_tail(&mac_cfg->node, list); + mutex_unlock(&hdev->vport_cfg_mutex); +} + +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head *list; + bool uc_flag, mc_flag; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &hdev->uc_mac_list : &hdev->mc_mac_list; + + uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; + mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; + + mutex_lock(&hdev->vport_cfg_mutex); + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (mac_cfg->vport_id == vport->vport_id && + ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { + if (uc_flag && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_addr); + + if (mc_flag && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_addr); + + list_del(&mac_cfg->node); + kfree(mac_cfg); + break; + } + } + + mutex_unlock(&hdev->vport_cfg_mutex); +} + +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct hclge_dev *hdev = vport->back; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &hdev->uc_mac_list : &hdev->mc_mac_list; + + mutex_lock(&hdev->vport_cfg_mutex); + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (mac_cfg->vport_id == vport->vport_id) { + if (mac_type == HCLGE_MAC_ADDR_UC && + mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, + mac_cfg->mac_addr); + + if (mac_type == HCLGE_MAC_ADDR_MC && + mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, + mac_cfg->mac_addr); + + mac_cfg->hd_tbl_status = false; + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } + } + + mutex_unlock(&hdev->vport_cfg_mutex); +} + +static void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + + mutex_lock(&hdev->vport_cfg_mutex); + list_for_each_entry_safe(mac_cfg, tmp, &hdev->uc_mac_list, node) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + + list_for_each_entry_safe(mac_cfg, tmp, &hdev->mc_mac_list, node) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + + mutex_unlock(&hdev->vport_cfg_mutex); +} + static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, u16 cmdq_resp, u8 resp_code) { @@ -6517,23 +6621,29 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + bool writen_to_tbl = false; + int ret = 0; - return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, - 0, is_kill); -} - -static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, - u16 vlan, u8 qos, __be16 proto) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; + /* when port base vlan enabled, we use port base vlan as the vlan + * filter condition. In this case, we don't update vlan filter table + * when user add new vlan or remove exist vlan, just update the vport + * vlan list. The vlan id in vlan list will be writen in vlan filter + * table until port base vlan disabled + */ + if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, + vlan_id, 0, is_kill); + writen_to_tbl = true; + } - return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); + if (!ret) { + if (is_kill) + hclge_rm_vport_vlan_table(vport, vlan_id, false); + else + hclge_add_vport_vlan_table(vport, vlan_id, + writen_to_tbl); + } + return ret; } static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) @@ -6572,7 +6682,6 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) dev_err(&hdev->pdev->dev, "Send port txvlan cfg command fail, ret =%d\n", status); - return status; } @@ -6605,10 +6714,54 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) dev_err(&hdev->pdev->dev, "Send port rxvlan cfg command fail, ret =%d\n", status); - return status; } +static int hclge_vlan_offload_cfg(struct hclge_vport *vport, + u16 port_base_vlan_state, + u16 vlan_tag) +{ + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.default_tag1 = 0; + } else { + vport->txvlan_cfg.accept_tag1 = false; + vport->txvlan_cfg.insert_tag1_en = true; + vport->txvlan_cfg.default_tag1 = vlan_tag; + } + + vport->txvlan_cfg.accept_untag1 = true; + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them. The + * This two fields can not configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag2 = 0; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + } else { + vport->rxvlan_cfg.strip_tag1_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_en = true; + } + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) { struct hclge_rx_vlan_type_cfg_cmd *rx_req; @@ -6691,53 +6844,273 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag1 = true; - vport->txvlan_cfg.accept_untag1 = true; - - /* accept_tag2 and accept_untag2 are not supported on - * pdev revision(0x20), new revision support them. The - * value of this two fields will not return error when driver - * send command to fireware in revision(0x20). - * This two fields can not configured by user. - */ - vport->txvlan_cfg.accept_tag2 = true; - vport->txvlan_cfg.accept_untag2 = true; + u16 vlan_tag; - vport->txvlan_cfg.insert_tag1_en = false; - vport->txvlan_cfg.insert_tag2_en = false; - vport->txvlan_cfg.default_tag1 = 0; - vport->txvlan_cfg.default_tag2 = 0; + vport = &hdev->vport[i]; + vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - ret = hclge_set_vlan_tx_offload_cfg(vport); + ret = hclge_vlan_offload_cfg(vport, + vport->port_base_vlan_cfg.state, + vlan_tag); if (ret) return ret; + } - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = true; - vport->rxvlan_cfg.vlan1_vlan_prionly = false; - vport->rxvlan_cfg.vlan2_vlan_prionly = false; + return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); +} - ret = hclge_set_vlan_rx_offload_cfg(vport); - if (ret) - return ret; +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl) +{ + struct hclge_vport_vlan_cfg *vlan; + + /* vlan 0 is reserved */ + if (!vlan_id) + return; + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return; + + vlan->hd_tbl_status = writen_to_tbl; + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vport->vlan_list); +} + +static void hclge_add_vport_all_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, + false); + + vlan->hd_tbl_status = true; } +} - return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + if (is_write_tbl && vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan_id, 0, + true); + + list_del(&vlan->node); + kfree(vlan); + break; + } + } +} + +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, + true); + + vlan->hd_tbl_status = false; + if (is_del_list) { + list_del(&vlan->node); + kfree(vlan); + } + } +} + +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + list_del(&vlan->node); + kfree(vlan); + } + } } int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = enable; + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + } else { + vport->rxvlan_cfg.strip_tag1_en = enable; + vport->rxvlan_cfg.strip_tag2_en = true; + } vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport->rxvlan_cfg.rx_vlan_offload_en = enable; return hclge_set_vlan_rx_offload_cfg(vport); } +static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, + u16 port_base_vlan_state, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { + hclge_rm_vport_all_vlan_table(vport, false); + return hclge_set_vlan_filter_hw(hdev, + htons(new_info->vlan_proto), + vport->vport_id, + new_info->vlan_tag, + new_info->qos, false); + + } else { + ret = hclge_set_vlan_filter_hw(hdev, + htons(old_info->vlan_proto), + vport->vport_id, + old_info->vlan_tag, + old_info->qos, true); + if (ret) + return ret; + + hclge_add_vport_all_vlan_table(vport); + return 0; + } +} + +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_vlan_info *old_vlan_info; + struct hclge_dev *hdev = vport->back; + int ret; + + old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag); + if (ret) + return ret; + + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { + hclge_set_vlan_filter_hw(hdev, old_vlan_info->vlan_proto, + vport->vport_id, + old_vlan_info->vlan_tag, + old_vlan_info->qos, true); + hclge_set_vlan_filter_hw(hdev, vlan_info->vlan_proto, + vport->vport_id, vlan_info->vlan_tag, + vlan_info->qos, false); + goto update; + } + + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); + if (ret) + return ret; + + /* update state only when disable/enable port base vlan */ + vport->port_base_vlan_cfg.state = state; + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + +update: + vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag; + vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos; + vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto; + + return 0; +} + +static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, + enum hnae3_port_base_vlan_state state, + u16 vlan) +{ + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_ENABLE; + } else { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_DISABLE; + else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_MODIFY; + } +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + u16 state; + int ret; + + if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + vport = &hdev->vport[vfid]; + state = hclge_get_port_base_vlan_state(vport, + vport->port_base_vlan_cfg.state, + vlan); + if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) + return 0; + + vlan_info.vlan_tag = vlan; + vlan_info.qos = qos; + vlan_info.vlan_proto = ntohs(proto); + + /* update port base vlan for pf */ + if (!vfid) { + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + + return ret; + } + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + return hclge_update_port_base_vlan_cfg(vport, state, + &vlan_info); + } else { + ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + (u8)vfid, state, + vlan, qos, + ntohs(proto)); + return ret; + } +} + static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; @@ -7173,12 +7546,13 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } -static int hclge_init_nic_client_instance(struct hnae3_client *client, - struct hclge_dev *hdev, +static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, struct hclge_vport *vport) { + struct hnae3_client *client = vport->nic.client; + struct hclge_dev *hdev = ae_dev->priv; int rst_cnt = hdev->reset_count; - int ret = 0; + int ret; ret = client->ops->init_instance(&vport->nic); if (ret) @@ -7191,10 +7565,36 @@ static int hclge_init_nic_client_instance(struct hnae3_client *client, &hdev->state); client->ops->uninit_instance(&vport->nic, 0); - ret = -EBUSY; + return -EBUSY; } - return ret; + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; +} + +static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, + struct hclge_vport *vport) +{ + struct hnae3_client *client = vport->roce.client; + struct hclge_dev *hdev = ae_dev->priv; + int ret; + + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || + !hdev->nic_client) + return 0; + + ret = hclge_init_roce_base_info(vport); + if (ret) + return ret; + + ret = client->ops->init_instance(&vport->roce); + if (ret) + return ret; + + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; } static int hclge_init_client_instance(struct hnae3_client *client, @@ -7211,28 +7611,13 @@ static int hclge_init_client_instance(struct hnae3_client *client, case HNAE3_CLIENT_KNIC: hdev->nic_client = client; vport->nic.client = client; - ret = hclge_init_nic_client_instance(client, hdev, - vport); + ret = hclge_init_nic_client_instance(ae_dev, vport); if (ret) goto clear_nic; - hnae3_set_client_init_flag(client, ae_dev, 1); - - if (hdev->roce_client && - hnae3_dev_roce_supported(hdev)) { - struct hnae3_client *rc = hdev->roce_client; - - ret = hclge_init_roce_base_info(vport); - if (ret) - goto clear_roce; - - ret = rc->ops->init_instance(&vport->roce); - if (ret) - goto clear_roce; - - hnae3_set_client_init_flag(hdev->roce_client, - ae_dev, 1); - } + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; break; case HNAE3_CLIENT_UNIC: @@ -7252,17 +7637,9 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->roce.client = client; } - if (hdev->roce_client && hdev->nic_client) { - ret = hclge_init_roce_base_info(vport); - if (ret) - goto clear_roce; - - ret = client->ops->init_instance(&vport->roce); - if (ret) - goto clear_roce; - - hnae3_set_client_init_flag(client, ae_dev, 1); - } + ret = hclge_init_roce_client_instance(ae_dev, vport); + if (ret) + goto clear_roce; break; default: @@ -7445,6 +7822,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; mutex_init(&hdev->vport_lock); + mutex_init(&hdev->vport_cfg_mutex); + INIT_LIST_HEAD(&hdev->uc_mac_list); + INIT_LIST_HEAD(&hdev->mc_mac_list); ret = hclge_pci_init(hdev); if (ret) { @@ -7583,6 +7963,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_clear_all_ras_errors(hdev); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to clear all ras states\n", ret); + goto err_mdiobus_unreg; + } + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); @@ -7716,6 +8103,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_clear_all_ras_errors(hdev); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to clear all ras states\n", ret); + return ret; + } + hclge_reset_vport_state(hdev); dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", @@ -7745,6 +8139,9 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); + hclge_uninit_vport_mac_table(hdev); + hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_cfg_mutex); ae_dev->priv = NULL; } @@ -7982,7 +8379,7 @@ static int hclge_get_regs_len(struct hnae3_handle *handle) if (ret) { dev_err(&hdev->pdev->dev, "Get register number failed, ret = %d.\n", ret); - return -EOPNOTSUPP; + return 0; } return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); @@ -8077,7 +8474,7 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } -static int hclge_gro_en(struct hnae3_handle *handle, int enable) +static int hclge_gro_en(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 23c47fea3c2f..208ba6bb632a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -579,6 +579,24 @@ struct hclge_fd_ad_data { u16 rule_id; }; +struct hclge_vport_mac_addr_cfg { + struct list_head node; + int vport_id; + int hd_tbl_status; + u8 mac_addr[ETH_ALEN]; +}; + +enum HCLGE_MAC_ADDR_TYPE { + HCLGE_MAC_ADDR_UC, + HCLGE_MAC_ADDR_MC +}; + +struct hclge_vport_vlan_cfg { + struct list_head node; + int hd_tbl_status; + u16 vlan_id; +}; + /* For each bit of TCAM entry, it uses a pair of 'x' and * 'y' to indicate which value to match, like below: * ---------------------------------- @@ -719,6 +737,10 @@ struct hclge_dev { /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; struct mutex umv_mutex; /* protect share_umv_size */ + + struct mutex vport_cfg_mutex; /* Protect stored vf table */ + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ }; /* VPort level vlan tag configuration for TX direction */ @@ -735,10 +757,11 @@ struct hclge_tx_vtag_cfg { /* VPort level vlan tag configuration for RX direction */ struct hclge_rx_vtag_cfg { - bool strip_tag1_en; /* Whether strip inner vlan tag */ - bool strip_tag2_en; /* Whether strip outer vlan tag */ - bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ - bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ + u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ + u8 strip_tag1_en; /* Whether strip inner vlan tag */ + u8 strip_tag2_en; /* Whether strip outer vlan tag */ + u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ + u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ }; struct hclge_rss_tuple_cfg { @@ -757,6 +780,17 @@ enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_MAX }; +struct hclge_vlan_info { + u16 vlan_proto; /* sofar support 802.1Q only */ + u16 qos; + u16 vlan_tag; +}; + +struct hclge_port_base_vlan_config { + u16 state; + struct hclge_vlan_info vlan_info; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -773,6 +807,8 @@ struct hclge_vport { u16 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; + struct list_head vlan_list; /* Store VF vlan table */ + struct hclge_port_base_vlan_config port_base_vlan_cfg; struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; @@ -839,4 +875,17 @@ int hclge_vport_start(struct hclge_vport *vport); void hclge_vport_stop(struct hclge_vport *vport); int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info); +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 97acb1f4d25d..e0fddfd8542a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -224,12 +224,24 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, hclge_rm_uc_addr_common(vport, old_addr); status = hclge_add_uc_addr_common(vport, mac_addr); - if (status) + if (status) { hclge_add_uc_addr_common(vport, old_addr); + } else { + hclge_rm_vport_mac_table(vport, old_addr, + false, HCLGE_MAC_ADDR_UC); + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); + } } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { status = hclge_add_uc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { status = hclge_rm_uc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_UC); } else { dev_err(&hdev->pdev->dev, "failed to set unicast mac addr, unknown subcode %d\n", @@ -255,8 +267,14 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { status = hclge_add_mc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_MC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_MC); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", @@ -271,9 +289,24 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, return 0; } +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto) +{ +#define MSG_DATA_SIZE 8 + u8 msg_data[MSG_DATA_SIZE]; + + memcpy(&msg_data[0], &state, sizeof(u16)); + memcpy(&msg_data[2], &vlan_tag, sizeof(u16)); + memcpy(&msg_data[4], &qos, sizeof(u16)); + memcpy(&msg_data[6], &vlan_proto, sizeof(u16)); + + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HLCGE_MBX_PUSH_VLAN_INFO, vfid); +} + static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { int status = 0; @@ -292,11 +325,24 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, bool en = mbx_req->msg[2] ? true : false; status = hclge_en_hw_strip_rxvtag(handle, en); + } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) { + struct hclge_vlan_info vlan_info; + u16 state; + + memcpy(&state, &mbx_req->msg[2], sizeof(u16)); + memcpy(&vlan_info.vlan_tag, &mbx_req->msg[4], sizeof(u16)); + memcpy(&vlan_info.qos, &mbx_req->msg[6], sizeof(u16)); + memcpy(&vlan_info.vlan_proto, &mbx_req->msg[8], sizeof(u16)); + status = hclge_update_port_base_vlan_cfg(vport, state, + &vlan_info); + } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { + u8 state; + + state = vport->port_base_vlan_cfg.state; + status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state, + sizeof(u8)); } - if (gen_resp) - status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); - return status; } @@ -369,16 +415,19 @@ static int hclge_get_link_info(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[8]; + u8 msg_data[10]; + u16 media_type; u8 dest_vfid; u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; duplex = hdev->hw.mac.duplex; + media_type = hdev->hw.mac.media_type; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); + memcpy(&msg_data[8], &media_type, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ @@ -386,6 +435,29 @@ static int hclge_get_link_info(struct hclge_vport *vport, HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); } +static void hclge_get_vf_link_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_SUPPORTED 1 + struct hclge_dev *hdev = vport->back; + unsigned long advertising; + unsigned long supported; + unsigned long send_data; + u8 msg_data[10]; + u8 dest_vfid; + + advertising = hdev->hw.mac.advertising[0]; + supported = hdev->hw.mac.supported[0]; + dest_vfid = mbx_req->mbx_src_vfid; + msg_data[0] = mbx_req->msg[2]; + + send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; + + memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); + hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HCLGE_MBX_LINK_STAT_MODE, dest_vfid); +} + static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { @@ -443,6 +515,24 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2); } +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_RSS_MBX_RESP_LEN 8 + u8 resp_data[HCLGE_RSS_MBX_RESP_LEN]; + struct hclge_dev *hdev = vport->back; + u8 index; + + index = mbx_req->msg[2]; + + memcpy(&resp_data[0], + &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], + HCLGE_RSS_MBX_RESP_LEN); + + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + HCLGE_RSS_MBX_RESP_LEN); +} + static bool hclge_cmd_crq_empty(struct hclge_hw *hw) { u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); @@ -514,7 +604,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_VLAN: - ret = hclge_set_vf_vlan_cfg(vport, req, false); + ret = hclge_set_vf_vlan_cfg(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF failed(%d) to config VF's VLAN\n", @@ -578,6 +668,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF failed(%d) to get qid for VF\n", ret); break; + case HCLGE_MBX_GET_VF_FLR_STATUS: + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + break; + case HCLGE_MBX_GET_RSS_KEY: + ret = hclge_get_rss_key(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to set mtu\n", ret); + break; + case HCLGE_MBX_GET_LINK_MODE: + hclge_get_vf_link_mode(vport, req); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 17342b7074d1..3ea83d1a6859 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -209,6 +209,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) return 0; } +static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + u8 resp_msg; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, + NULL, 0, true, &resp_msg, sizeof(u8)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get port base vlan state failed %d", + ret); + return ret; + } + + nic->port_base_vlan_state = resp_msg; + + return 0; +} + static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 @@ -368,6 +389,21 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) } } +void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_ADVERTISING 0 +#define HCLGEVF_SUPPORTED 1 + u8 send_msg; + u8 resp_msg; + + send_msg = HCLGEVF_ADVERTISING; + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, + sizeof(u8), false, &resp_msg, sizeof(u8)); + send_msg = HCLGEVF_SUPPORTED; + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, + sizeof(u8), false, &resp_msg, sizeof(u8)); +} + static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) { struct hnae3_handle *nic = &hdev->nic; @@ -569,12 +605,50 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } +/* for revision 0x20, vf shared the same rss config with pf */ +static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RSS_MBX_RESP_LEN 8 + + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; + u16 msg_num, hash_key_index; + u8 index; + int ret; + + msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / + HCLGEVF_RSS_MBX_RESP_LEN; + for (index = 0; index < msg_num; index++) { + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, + &index, sizeof(index), + true, resp_msg, + HCLGEVF_RSS_MBX_RESP_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF get rss hash key from PF failed, ret=%d", + ret); + return ret; + } + + hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; + if (index == msg_num - 1) + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], + HCLGEVF_RSS_KEY_SIZE - hash_key_index); + else + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); + } + + return 0; +} + static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int i, ret; if (handle->pdev->revision >= HNAE3_REVISION_ID_21) { /* Get hash algorithm */ @@ -596,6 +670,16 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, if (key) memcpy(key, rss_cfg->rss_hash_key, HCLGEVF_RSS_KEY_SIZE); + } else { + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (key) { + ret = hclgevf_get_rss_hash_key(hdev); + if (ret) + return ret; + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + } } if (indir) @@ -1650,6 +1734,8 @@ static void hclgevf_service_task(struct work_struct *work) */ hclgevf_request_link_info(hdev); + hclgevf_update_link_mode(hdev); + hclgevf_deferred_task_schedule(hdev); clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); @@ -1730,7 +1816,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; - hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; + /* get current port base vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; /* get queue configuration from PF */ ret = hclgevf_get_queue_info(hdev); @@ -1885,6 +1974,8 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) hclgevf_request_link_info(hdev); + hclgevf_update_link_mode(hdev); + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); return 0; @@ -2067,9 +2158,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } -static int hclgevf_init_nic_client_instance(struct hnae3_client *client, - struct hclgevf_dev *hdev) +static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, + struct hnae3_client *client) { + struct hclgevf_dev *hdev = ae_dev->priv; int rst_cnt = hdev->reset_count; int ret = 0; @@ -2083,10 +2175,35 @@ static int hclgevf_init_nic_client_instance(struct hnae3_client *client, clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); client->ops->uninit_instance(&hdev->nic, 0); - ret = -EBUSY; + return -EBUSY; } - return ret; + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; +} + +static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, + struct hnae3_client *client) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + int ret; + + if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || + !hdev->nic_client) + return 0; + + ret = hclgevf_init_roce_base_info(hdev); + if (ret) + return ret; + + ret = client->ops->init_instance(&hdev->roce); + if (ret) + return ret; + + hnae3_set_client_init_flag(client, ae_dev, 1); + + return 0; } static int hclgevf_init_client_instance(struct hnae3_client *client, @@ -2100,25 +2217,15 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, hdev->nic_client = client; hdev->nic.client = client; - ret = hclgevf_init_nic_client_instance(client, hdev); + ret = hclgevf_init_nic_client_instance(ae_dev, client); if (ret) goto clear_nic; - hnae3_set_client_init_flag(client, ae_dev, 1); - - if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { - struct hnae3_client *rc = hdev->roce_client; - - ret = hclgevf_init_roce_base_info(hdev); - if (ret) - goto clear_roce; - ret = rc->ops->init_instance(&hdev->roce); - if (ret) - goto clear_roce; + ret = hclgevf_init_roce_client_instance(ae_dev, + hdev->roce_client); + if (ret) + goto clear_roce; - hnae3_set_client_init_flag(hdev->roce_client, ae_dev, - 1); - } break; case HNAE3_CLIENT_UNIC: hdev->nic_client = client; @@ -2136,17 +2243,10 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, hdev->roce.client = client; } - if (hdev->roce_client && hdev->nic_client) { - ret = hclgevf_init_roce_base_info(hdev); - if (ret) - goto clear_roce; - - ret = client->ops->init_instance(&hdev->roce); - if (ret) - goto clear_roce; - } + ret = hclgevf_init_roce_client_instance(ae_dev, client); + if (ret) + goto clear_roce; - hnae3_set_client_init_flag(client, ae_dev, 1); break; default: return -EINVAL; @@ -2589,7 +2689,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } -static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) +static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -2605,6 +2705,16 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle, *media_type = hdev->hw.mac.media_type; } +static void hclgevf_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *supported = hdev->hw.mac.supported; + *advertising = hdev->hw.mac.advertising; +} + static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -2691,6 +2801,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } } +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size) +{ + struct hnae3_handle *nic = &hdev->nic; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + + // send msg to pf and wait update port base vlan info + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_PORT_BASE_VLAN_CFG, + port_base_vlan_info, data_size, + false, NULL, 0); + + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, @@ -2743,6 +2878,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, .set_mtu = hclgevf_set_mtu, .get_global_queue_id = hclgevf_get_qid_global, + .get_link_mode = hclgevf_get_link_mode, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 232f6f869a43..f39c4b044bd7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -93,6 +93,8 @@ struct hclgevf_mac { int link; u8 duplex; u32 speed; + u64 supported; + u64 advertising; }; struct hclgevf_hw { @@ -236,4 +238,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, u8 duplex); void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size); + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 8ffe1b870f36..1f21cedb2f6b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -197,6 +197,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: + case HCLGE_MBX_LINK_STAT_MODE: + case HLCGE_MBX_PUSH_VLAN_INFO: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -242,11 +244,12 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { enum hnae3_reset_type reset_type; - u16 link_status; - u16 *msg_q; + u16 link_status, state; + u16 *msg_q, *vlan_info; u8 duplex; u32 speed; u32 tail; + u8 idx; /* we can safely clear it now as we are at start of the async message * processing @@ -270,11 +273,22 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = le16_to_cpu(msg_q[1]); memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)le16_to_cpu(msg_q[4]); + hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]); /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); + break; + case HCLGE_MBX_LINK_STAT_MODE: + idx = (u8)le16_to_cpu(msg_q[1]); + if (idx) + memcpy(&hdev->hw.mac.supported , &msg_q[2], + sizeof(unsigned long)); + else + memcpy(&hdev->hw.mac.advertising, &msg_q[2], + sizeof(unsigned long)); + break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending @@ -288,6 +302,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) hclgevf_reset_task_schedule(hdev); break; + case HLCGE_MBX_PUSH_VLAN_INFO: + + state = le16_to_cpu(msg_q[1]); + vlan_info = &msg_q[1]; + hclgevf_update_port_base_vlan_info(hdev, state, + (u8 *)vlan_info, + 8); + break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%d) message from arq\n", -- GitLab