提交 f990b79b 编写于 作者: A Alexander Duyck 提交者: Jeff Kirsher

ixgbe: Let the Rx buffer allocation clear status bits instead of cleanup

This change makes it so that we always clear the status/error bits in the
Rx descriptor in the allocation path instead of the cleanup path.  The
advantage to this is that we spend less time modifying data.  As such we
can modify the data once and then let it go cold in the cache instead of
writing it, reading it, and then writing it again.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: NStephen Ko <stephen.s.ko@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 1d2024f6
......@@ -1101,91 +1101,124 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}
/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
**/
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
struct sk_buff *skb;
u16 i = rx_ring->next_to_use;
struct sk_buff *skb = bi->skb;
dma_addr_t dma = bi->dma;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev)
return;
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
skb = bi->skb;
if (dma)
return true;
if (!skb) {
if (likely(!skb)) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
bi->skb = skb;
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers;
return false;
}
/* initialize queue mapping */
/* initialize skb for ring */
skb_record_rx_queue(skb, rx_ring->queue_index);
bi->skb = skb;
}
if (!bi->dma) {
bi->dma = dma_map_single(rx_ring->dev,
skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
dma = dma_map_single(rx_ring->dev, skb->data,
rx_ring->rx_buf_len, DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, dma)) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
return false;
}
if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
bi->dma = dma;
return true;
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t page_dma = bi->page_dma;
unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
if (page_dma)
return true;
if (!page) {
page = alloc_page(GFP_ATOMIC | __GFP_COLD);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
return false;
}
}
if (!bi->page_dma) {
/* use a half page if we're re-using */
bi->page_offset ^= PAGE_SIZE / 2;
bi->page_dma = dma_map_page(rx_ring->dev,
bi->page,
bi->page_offset,
PAGE_SIZE / 2,
page_dma = dma_map_page(rx_ring->dev, page,
page_offset, PAGE_SIZE / 2,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
return false;
}
bi->page_dma = page_dma;
bi->page_offset = page_offset;
return true;
}
/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
**/
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
/* nothing to do or no valid netdev defined */
if (!cleaned_count || !rx_ring->netdev)
return;
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
while (cleaned_count--) {
if (!ixgbe_alloc_mapped_skb(rx_ring, bi))
break;
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
if (ring_is_ps_enabled(rx_ring)) {
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break;
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
} else {
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
rx_desc->read.hdr_addr = 0;
}
rx_desc++;
bi++;
i++;
if (i == rx_ring->count)
i = 0;
if (unlikely(!i)) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, 0);
bi = rx_ring->rx_buffer_info;
i -= rx_ring->count;
}
no_buffers:
/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
}
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
ixgbe_release_rx_desc(rx_ring, i);
......@@ -1593,8 +1626,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
budget--;
next_desc:
rx_desc->wb.upper.status_error = 0;
if (!budget)
break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册