diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 393ceae7f3dfaf82f213c13dc3d2e723515282b1..744b64108130580275f6c416b12679b7c089af16 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -131,6 +131,13 @@ struct vf_macvlans { u8 vf_macvlan[ETH_ALEN]; }; +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct ixgbe_tx_buffer { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 00e60c5ab27fcc31b73dad0be89d152eabd8cb60..305e1e4f80b938298dfa9e5dc94c7930d7e11549 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -772,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) return ret; } -#define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) - -/* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ - (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) -#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ - /** * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout * @adapter: driver private struct @@ -6832,14 +6823,34 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { - unsigned int tx_flags = 0; int tso; - u16 count = 0; + u32 tx_flags = 0; +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + unsigned short f; +#endif u16 first; - unsigned int f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol; u8 hdr_len = 0; + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + protocol = vlan_get_protocol(skb); if (vlan_tx_tag_present(skb)) { @@ -6863,25 +6874,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && (protocol == htons(ETH_P_FCOE))) tx_flags |= IXGBE_TX_FLAGS_FCOE; -#endif - - /* four things can cause us to need a context descriptor */ - if (skb_is_gso(skb) || - (skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IXGBE_TX_FLAGS_VLAN) || - (tx_flags & IXGBE_TX_FLAGS_FCOE)) - count++; - - count += TXD_USE_COUNT(skb_headlen(skb)); - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - - if (ixgbe_maybe_stop_tx(tx_ring, count)) { - tx_ring->tx_stats.tx_busy++; - return NETDEV_TX_BUSY; - } +#endif + /* record the location of the first descriptor for this packet */ first = tx_ring->next_to_use; + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { #ifdef IXGBE_FCOE /* setup tx offload for FCoE */