提交 f3213d93 编写于 作者: A Alexander Duyck 提交者: Jeff Kirsher

ixgbe: Update driver to make use of DMA attributes in Rx path

This patch adds support for DMA_ATTR_SKIP_CPU_SYNC and
DMA_ATTR_WEAK_ORDERING.  By enabling both of these for the Rx path we are
able to see performance improvements on architectures that implement either
one due to the fact that page mapping and unmapping only has to sync what
is actually being used instead of the entire buffer.  In addition by
enabling the weak ordering attribute enables a performance improvement for
architectures that can associate a memory ordering with a DMA buffer such
as Sparc.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: NAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 f215af8c
...@@ -104,6 +104,9 @@ ...@@ -104,6 +104,9 @@
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGBE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
enum ixgbe_tx_flags { enum ixgbe_tx_flags {
/* cmd_type flags */ /* cmd_type flags */
IXGBE_TX_FLAGS_HW_VLAN = 0x01, IXGBE_TX_FLAGS_HW_VLAN = 0x01,
......
...@@ -1570,8 +1570,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, ...@@ -1570,8 +1570,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
} }
/* map page for use */ /* map page for use */
dma = dma_map_page(rx_ring->dev, page, 0, dma = dma_map_page_attrs(rx_ring->dev, page, 0,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
/* /*
* if mapping failed free memory back to system since * if mapping failed free memory back to system since
...@@ -1614,6 +1616,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ...@@ -1614,6 +1616,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
if (!ixgbe_alloc_mapped_page(rx_ring, bi)) if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break; break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* /*
* Refresh the desc even if buffer_addrs didn't change * Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. * because each write-back erases this info.
...@@ -1832,8 +1840,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, ...@@ -1832,8 +1840,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
{ {
/* if the page was released unmap it, else just sync our portion */ /* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) { if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
IXGBE_CB(skb)->page_released = false; IXGBE_CB(skb)->page_released = false;
} else { } else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
...@@ -1917,12 +1927,6 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, ...@@ -1917,12 +1927,6 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
/* transfer page from old buffer to new buffer */ /* transfer page from old buffer to new buffer */
*new_buff = *old_buff; *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
new_buff->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
} }
static inline bool ixgbe_page_is_reserved(struct page *page) static inline bool ixgbe_page_is_reserved(struct page *page)
...@@ -2089,9 +2093,10 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, ...@@ -2089,9 +2093,10 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
IXGBE_CB(skb)->page_released = true; IXGBE_CB(skb)->page_released = true;
} else { } else {
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring), ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
} }
/* clear contents of buffer_info */ /* clear contents of buffer_info */
...@@ -4883,10 +4888,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ...@@ -4883,10 +4888,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
if (rx_buffer->skb) { if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb; struct sk_buff *skb = rx_buffer->skb;
if (IXGBE_CB(skb)->page_released) if (IXGBE_CB(skb)->page_released)
dma_unmap_page(dev, dma_unmap_page_attrs(dev,
IXGBE_CB(skb)->dma, IXGBE_CB(skb)->dma,
ixgbe_rx_bufsz(rx_ring), ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
dev_kfree_skb(skb); dev_kfree_skb(skb);
rx_buffer->skb = NULL; rx_buffer->skb = NULL;
} }
...@@ -4894,8 +4900,20 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ...@@ -4894,8 +4900,20 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
if (!rx_buffer->page) if (!rx_buffer->page)
continue; continue;
dma_unmap_page(dev, rx_buffer->dma, /* Invalidate cache lines that may have been written to by
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); * device so that we avoid corrupting memory.
*/
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* free resources associated with mapping */
dma_unmap_page_attrs(dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR);
__free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
rx_buffer->page = NULL; rx_buffer->page = NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册