提交 ad435ec6 编写于 作者: A Alexander Duyck 提交者: Jeff Kirsher

ixgbe: Remove tail write abstraction and add missing barrier

This change cleans up the tail writes for the ixgbe descriptor queues.  The
current implementation had me confused as I wasn't sure if it was still
making use of the surprise remove logic or not.

It also adds the mmiowb which is needed on ia64, mips, and a couple other
architectures in order to synchronize the MMIO writes with the Tx queue
_xmit_lock spinlock.

Cc: Don Skidmore <donald.c.skidmore@intel.com>
Signed-off-by: NAlexander Duyck <alexander.h.duyck@redhat.com>
Tested-by: NPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 18cb652a
...@@ -553,11 +553,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) ...@@ -553,11 +553,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
} }
static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
{
writel(value, ring->tail);
}
#define IXGBE_RX_DESC(R, i) \ #define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \ #define IXGBE_TX_DESC(R, i) \
......
...@@ -1416,22 +1416,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, ...@@ -1416,22 +1416,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = val;
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
ixgbe_write_tail(rx_ring, val);
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi) struct ixgbe_rx_buffer *bi)
{ {
...@@ -1517,8 +1501,20 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ...@@ -1517,8 +1501,20 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
i += rx_ring->count; i += rx_ring->count;
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i) {
ixgbe_release_rx_desc(rx_ring, i); rx_ring->next_to_use = i;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(i, rx_ring->tail);
}
} }
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
...@@ -6954,8 +6950,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6954,8 +6950,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
/* notify HW of packet */ writel(i, tx_ring->tail);
ixgbe_write_tail(tx_ring, i);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
} }
return; return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册