diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 33c35d3b7420fa9ae545aea4ebd5160036914718..aca9cef50d81a521f21aa74a93ebaebd0a27d793 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -182,6 +182,7 @@ struct i40e_lump_tracking { enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, I40E_FD_STAT_SB, + I40E_FD_STAT_ATR_TUNNEL, I40E_FD_STAT_PF_COUNT }; #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) @@ -189,6 +190,8 @@ enum i40e_fd_stat_idx { (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) #define I40E_FD_SB_STAT_IDX(pf_id) \ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) +#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL) struct i40e_fdir_filter { struct hlist_node fdir_node; @@ -263,8 +266,6 @@ struct i40e_pf { struct hlist_head fdir_filter_list; u16 fdir_pf_active_filters; - u16 fd_sb_cnt_idx; - u16 fd_atr_cnt_idx; unsigned long fd_flush_timestamp; u32 fd_flush_cnt; u32 fd_add_err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4cbaaeb902c47737274010d2070d6fb95c5637ce..9a68c65b17ea03bd00642aab5fe3b2e5a5066765 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), /* LPI stats */ @@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data) return *data; } +static inline bool i40e_active_vfs(struct i40e_pf *pf) +{ + struct i40e_vf *vfs = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE) + return true; + return false; +} + static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { @@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev, netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, &pf->state); + + if (i40e_active_vfs(pf)) { + dev_warn(&pf->pdev->dev, + "Please take active VFS offline and restart the adapter before running NIC diagnostics\n"); + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LOOPBACK] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, &pf->state); + goto skip_ol_tests; + } + /* If the device is online then take it offline */ if (if_running) /* indicate we're in test mode */ @@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_LOOPBACK] = 0; } +skip_ol_tests: + netif_info(pf, drv, netdev, "testing finished\n"); } @@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->pctype = 0; input->dest_vsi = vsi->id; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; - input->cnt_index = pf->fd_sb_cnt_idx; + input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); input->flow_type = fsp->flow_type; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 1803afeef23ede81ed906b5400e9f3164234a4de..c8b621e0e7cda622c5a0fa9e795a898e53886cf5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) * * The FC EOF is converted to the value understood by HW for descriptor * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() - * first. + * first and that already checks for all supported valid eof values. **/ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) { @@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) case FC_EOF_A: return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; default: - /* FIXME: still returns 0 */ - pr_err("Unrecognized EOF %x\n", eof); - return 0; + /* Supported valid eof shall be already checked by + * calling i40e_fcoe_eof_is_supported() first, + * therefore this default case shall never hit. + */ + WARN_ON(1); + return -EINVAL; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a54c14491e3b6a4dbc168980dd44d399b6766487..0a3e928a2b0014b62ecbbd4d27efd1b7607816dd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 4 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) dcb_cfg = &hw->local_dcbx_config; - /* See if DCB enabled with PFC TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || - !(dcb_cfg->pfc.pfcenable)) { + /* Collect Link XOFF stats when PFC is disabled */ + if (!dcb_cfg->pfc.pfcenable) { i40e_update_link_xoff_rx(pf); return; } @@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_atr_match, &nsd->fd_atr_match); - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_sb_match, &nsd->fd_sb_match); + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), + pf->stat_offsets_loaded, + &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = @@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi) pf->fd_add_err = pf->fd_atr_cnt = 0; if (pf->fd_tcp_rule > 0) { pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); pf->fd_tcp_rule = 0; } i40e_fdir_filter_restore(vsi); @@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; - dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } /* Wait for some more space to be available to turn on ATR */ @@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); } } } @@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!(time_after(jiffies, min_flush_time)) && (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { - dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); disable_atr = true; } @@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!disable_atr) pf->flags |= I40E_FLAG_FD_ATR_ENABLED; clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); - dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } } } @@ -7676,12 +7686,8 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - /* Setup a counter for fd_atr per PF */ - pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; - /* Setup a counter for fd_sb per PF */ - pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); } else { dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); @@ -7771,7 +7777,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; pf->fdir_pf_active_filters = 0; pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); /* if ATR was auto disabled it can be re-enabled. */ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0b4a7be2c7d2b21f2286732218c0cbb2f2bb61b2..cc82a7ffacb06326f259ff3e8c1f0fc4ec6e9838 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); - /* set the timestamp */ - tx_buf->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ @@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if (add) { pf->fd_tcp_rule++; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; } } else { @@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, (pf->fd_tcp_rule - 1) : 0; if (pf->fd_tcp_rule == 0) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); } } @@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { - dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; } @@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->vsi->seid, tx_ring->queue_index, tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1923,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * i40e_atr - Add a Flow Director ATR filter * @tx_ring: ring to add programming descriptor to * @skb: send buffer - * @flags: send flags + * @tx_flags: send tx flags * @protocol: wire protocol **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 flags, __be16 protocol) + u32 tx_flags, __be16 protocol) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; @@ -1952,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) + return; - /* Currently only IPv4/IPv6 with TCP is supported */ - if (protocol == htons(ETH_P_IP)) { - if (hdr.ipv4->protocol != IPPROTO_TCP) - return; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) { + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - } else if (protocol == htons(ETH_P_IPV6)) { - if (hdr.ipv6->nexthdr != IPPROTO_TCP) + /* Currently only IPv4/IPv6 with TCP is supported + * access ihl as u8 to avoid unaligned access on ia64 + */ + if (tx_flags & I40E_TX_FLAGS_IPV4) + hlen = (hdr.network[0] & 0x0F) << 2; + else if (protocol == htons(ETH_P_IPV6)) + hlen = sizeof(struct ipv6hdr); + else return; - - hlen = sizeof(struct ipv6hdr); } else { - return; + hdr.network = skb_inner_network_header(skb); + hlen = skb_inner_network_header_len(skb); } + /* Currently only IPv4/IPv6 with TCP is supported + * Note: tx_flags gets modified to reflect inner protocols in + * tx_enable_csum function if encap is enabled. + */ + if ((tx_flags & I40E_TX_FLAGS_IPV4) && + (hdr.ipv4->protocol != IPPROTO_TCP)) + return; + else if ((tx_flags & I40E_TX_FLAGS_IPV6) && + (hdr.ipv6->nexthdr != IPPROTO_TCP)) + return; + th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ @@ -2020,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - dtype_cmd |= - ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & - I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) + dtype_cmd |= + ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + else + dtype_cmd |= + ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); @@ -2043,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, * otherwise returns 0 to indicate the flags has been set properly. **/ #ifdef I40E_FCOE -int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) -#else -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, +inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) +#else +static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) #endif { __be16 protocol = skb->protocol; @@ -2117,16 +2130,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } } else { network_hdr_len = skb_network_header_len(skb); @@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -2394,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * Returns 0 if stop is not needed **/ #ifdef I40E_FCOE -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #else -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #endif { if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) @@ -2476,13 +2487,13 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, * @td_offset: offset for checksum or crc **/ #ifdef I40E_FCOE -void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) -#else -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, +inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) +#else +static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) #endif { unsigned int data_len = skb->data_len; @@ -2588,9 +2599,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -2643,11 +2651,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, * one descriptor. **/ #ifdef I40E_FCOE -int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#else -static int i40e_xmit_descriptor_count(struct sk_buff *skb, +inline int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring) +#else +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) #endif { unsigned int f; @@ -2709,7 +2717,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -2735,7 +2743,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 4b0b8102cdc39c2529f49c18d6b1cbc61c48c341..0dc48dc9ca61922a4b11bd0b7624f07c153c603a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -139,6 +139,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_TSYN (u32)(1 << 8) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -146,7 +147,6 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 568e855da0f3e16b29a81702766070974dd3da91..9a5a75b1e2bc053b50bec13adde2fd3aa4848595 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78d1c4ff565e8853473b70c3827e6a727ff3ce1c..4653b6e653c9470da76e9b35be2bd3767da7bf5b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int pre_existing_vfs = pci_num_vf(pdev); int err = 0; + if (pf->state & __I40E_TESTING) { + dev_warn(&pdev->dev, + "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); + err = -EPERM; + goto err_out; + } + dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); if (pre_existing_vfs && pre_existing_vfs != num_vfs) i40e_free_vfs(pf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 3ef23091439f72d90879acd3f820f15c71206127..ec7e220757db1e283cb385f58f0b5955f50e2b25 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->vsi->seid, tx_ring->queue_index, tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } @@ -1350,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) } /** - * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set @@ -1361,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) +static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -1406,16 +1399,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1466,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -1487,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -1496,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -1519,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } @@ -1532,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -1546,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -1675,7 +1666,44 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, } /** - * i40e_tx_map - Build the Tx descriptor + * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40evf_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} + +/** + * i40evf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use @@ -1684,9 +1712,9 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@ -1792,9 +1820,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -1811,8 +1836,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - writel(i, tx_ring->tail); + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); return; @@ -1834,44 +1863,7 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, } /** - * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -/** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - -/** - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on * @@ -1879,8 +1871,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * there is not enough descriptors available in this ring since we need at least * one descriptor. **/ -static int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) +static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) { unsigned int f; int count = 0; @@ -1895,7 +1887,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } @@ -1921,11 +1913,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u32 td_cmd = 0; u8 hdr_len = 0; int tso; - if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; /* prepare the xmit flags */ - if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; /* obtain protocol of skb */ @@ -1940,7 +1932,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -1961,17 +1953,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); - i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); - - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 1e49bb1fbac1f0de59444626cc9645b72aeac0da..e7a34f899f2cbb8150495a31e0690a95e90efc1a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -138,6 +138,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -145,7 +146,6 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index ec9d83a9337944f20f934cf6217467ee61ec7303..c463ec41579c708ffbe606ea0b31ca0485ea4c58 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9f6fb19062a00c43005d39e03c8037bf58f93488..9a1d0f142b091c3cecb06ce65ffac1d3c8e6bf1a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fdir_filter *input; union ixgbe_atr_input mask; + u8 queue; int err; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; - /* - * Don't allow programming if the action is a queue greater than - * the number of online Rx queues. + /* ring_cookie is a masked into a set of queues and ixgbe pools or + * we use the drop index. */ - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= adapter->num_rx_queues)) - return -EINVAL; + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && (ring >= adapter->num_rx_queues)) + return -EINVAL; + else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } /* Don't allow indexes to exist outside of available space */ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { @@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* program filters to filter memory */ err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, - (input->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[input->action]->reg_idx); + &input->filter, input->sw_idx, queue); if (err) goto err_out_w_lock; diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index ae832b45b44c01fe149547eb07734865dabcd478..0594933cdf55c715965db2ff46c88e272ca8114f 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -796,6 +796,31 @@ struct ethtool_rx_flow_spec { __u32 location; }; +/* How rings are layed out when accessing virtual functions or + * offloaded queues is device specific. To allow users to do flow + * steering and specify these queues the ring cookie is partitioned + * into a 32bit queue index with an 8 bit virtual function id. + * This also leaves the 3bytes for further specifiers. It is possible + * future devices may support more than 256 virtual functions if + * devices start supporting PCIe w/ARI. However at the moment I + * do not know of any devices that support this so I do not reserve + * space for this at this time. If a future patch consumes the next + * byte it should be aware of this possiblity. + */ +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; + /** * struct ethtool_rxnfc - command to get or set RX flow classification rules * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,