提交 24455800 编写于 作者: S Steve Hodgson 提交者: David S. Miller

sfc: Recycle discarded rx buffers back onto the queue

The cut-through design of the receive path means that packets that
fail to match the appropriate MAC filter are not discarded at the MAC
but are flagged in the completion event as 'to be discarded'.  On
networks with heavy multicast traffic, this can account for a
significant proportion of received packets, so it is worthwhile to
recycle the buffer immediately in this case rather than freeing it
and then reallocating it shortly after.

The only complication here is dealing with a page shared
between two receive buffers. In that case, we need to be
careful to free the dma mapping when both buffers have
been free'd by the kernel. This means that we can only
recycle such a page if both receive buffers are discarded.
Unfortunately, in an environment with 1500mtu,
rx_alloc_method=PAGE, and a mixture of discarded and
not-discarded frames hitting the same receive queue,
buffer recycling won't always be possible.
Signed-off-by: NBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 f7d6f379
...@@ -82,9 +82,10 @@ static unsigned int rx_refill_limit = 95; ...@@ -82,9 +82,10 @@ static unsigned int rx_refill_limit = 95;
* RX maximum head room required. * RX maximum head room required.
* *
* This must be at least 1 to prevent overflow and at least 2 to allow * This must be at least 1 to prevent overflow and at least 2 to allow
* pipelined receives. * pipelined receives. Then a further 1 because efx_recycle_rx_buffer()
* might insert two buffers.
*/ */
#define EFX_RXD_HEAD_ROOM 2 #define EFX_RXD_HEAD_ROOM 3
static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
{ {
...@@ -250,6 +251,70 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, ...@@ -250,6 +251,70 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
efx_free_rx_buffer(rx_queue->efx, rx_buf); efx_free_rx_buffer(rx_queue->efx, rx_buf);
} }
/* Attempt to resurrect the other receive buffer that used to share this page,
* which had previously been passed up to the kernel and freed. */
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
struct efx_rx_buffer *new_buf;
unsigned index;
/* We could have recycled the 1st half, then refilled
* the queue, and now recycle the 2nd half.
* EFX_RXD_HEAD_ROOM ensures that there is always room
* to reinsert two buffers (once). */
get_page(rx_buf->page);
index = rx_queue->added_count & EFX_RXQ_MASK;
new_buf = efx_rx_buffer(rx_queue, index);
new_buf->dma_addr = rx_buf->dma_addr - (PAGE_SIZE >> 1);
new_buf->skb = NULL;
new_buf->page = rx_buf->page;
new_buf->data = rx_buf->data - (PAGE_SIZE >> 1);
new_buf->len = rx_buf->len;
++rx_queue->added_count;
}
/* Recycle the given rx buffer directly back into the rx_queue. There is
* always room to add this buffer, because we've just popped a buffer. */
static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = channel->efx;
struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
struct efx_rx_buffer *new_buf;
unsigned index;
if (rx_buf->page != NULL && efx->rx_buffer_len < (PAGE_SIZE >> 1)) {
if (efx_rx_buf_offset(rx_buf) & (PAGE_SIZE >> 1)) {
/* This is the 2nd half of a page split between two
* buffers, If page_count() is > 1 then the kernel
* is holding onto the previous buffer */
if (page_count(rx_buf->page) != 1) {
efx_fini_rx_buffer(rx_queue, rx_buf);
return;
}
efx_resurrect_rx_buffer(rx_queue, rx_buf);
} else {
/* Free the 1st buffer's reference on the page. If the
* 2nd buffer is also discarded, this buffer will be
* resurrected above */
put_page(rx_buf->page);
rx_buf->page = NULL;
return;
}
}
index = rx_queue->added_count & EFX_RXQ_MASK;
new_buf = efx_rx_buffer(rx_queue, index);
memcpy(new_buf, rx_buf, sizeof(*new_buf));
rx_buf->page = NULL;
rx_buf->skb = NULL;
++rx_queue->added_count;
}
/** /**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly * efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue * @rx_queue: RX descriptor queue
...@@ -271,7 +336,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) ...@@ -271,7 +336,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
fill_level = (rx_queue->added_count - rx_queue->removed_count); fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
if (fill_level >= rx_queue->fast_fill_trigger) if (fill_level >= rx_queue->fast_fill_trigger)
return; goto out;
/* Record minimum fill level */ /* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) { if (unlikely(fill_level < rx_queue->min_fill)) {
...@@ -281,7 +346,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) ...@@ -281,7 +346,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
space = rx_queue->fast_fill_limit - fill_level; space = rx_queue->fast_fill_limit - fill_level;
if (space < EFX_RX_BATCH) if (space < EFX_RX_BATCH)
return; goto out;
EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
" level %d to level %d using %s allocation\n", " level %d to level %d using %s allocation\n",
...@@ -306,8 +371,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) ...@@ -306,8 +371,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
rx_queue->added_count - rx_queue->removed_count); rx_queue->added_count - rx_queue->removed_count);
out: out:
/* Send write pointer to card. */ if (rx_queue->notified_count != rx_queue->added_count)
efx_nic_notify_rx_desc(rx_queue); efx_nic_notify_rx_desc(rx_queue);
} }
void efx_rx_slow_fill(unsigned long context) void efx_rx_slow_fill(unsigned long context)
...@@ -418,6 +483,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -418,6 +483,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard) unsigned int len, bool checksummed, bool discard)
{ {
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = rx_queue->channel;
struct efx_rx_buffer *rx_buf; struct efx_rx_buffer *rx_buf;
bool leak_packet = false; bool leak_packet = false;
...@@ -445,12 +511,13 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -445,12 +511,13 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Discard packet, if instructed to do so */ /* Discard packet, if instructed to do so */
if (unlikely(discard)) { if (unlikely(discard)) {
if (unlikely(leak_packet)) if (unlikely(leak_packet))
rx_queue->channel->n_skbuff_leaks++; channel->n_skbuff_leaks++;
else else
/* We haven't called efx_unmap_rx_buffer yet, efx_recycle_rx_buffer(channel, rx_buf);
* so fini the entire rx_buffer here */
efx_fini_rx_buffer(rx_queue, rx_buf); /* Don't hold off the previous receive */
return; rx_buf = NULL;
goto out;
} }
/* Release card resources - assumes all RX buffers consumed in-order /* Release card resources - assumes all RX buffers consumed in-order
...@@ -467,6 +534,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -467,6 +534,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
* prefetched into cache. * prefetched into cache.
*/ */
rx_buf->len = len; rx_buf->len = len;
out:
if (rx_queue->channel->rx_pkt) if (rx_queue->channel->rx_pkt)
__efx_rx_packet(rx_queue->channel, __efx_rx_packet(rx_queue->channel,
rx_queue->channel->rx_pkt, rx_queue->channel->rx_pkt,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册