提交 a79afa78 编写于 作者: A Alexander Lobakin 提交者: Jakub Kicinski

net: use the new dev_page_is_reusable() instead of private versions

Now we can remove a bunch of identical functions from the drivers and
make them use common dev_page_is_reusable(). All {,un}likely() checks
are omitted since it's already present in this helper.
Also update some comments near the call sites.
Suggested-by: NDavid Rientjes <rientjes@google.com>
Suggested-by: NJakub Kicinski <kuba@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: NAlexander Lobakin <alobakin@pm.me>
Reviewed-by: NJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: NJakub Kicinski <kuba@kernel.org>
上级 bc38f30f
...@@ -2800,12 +2800,6 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -2800,12 +2800,6 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
} }
static bool hns3_page_is_reusable(struct page *page)
{
return page_to_nid(page) == numa_mem_id() &&
!page_is_pfmemalloc(page);
}
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{ {
return (page_count(cb->priv) - cb->pagecnt_bias) == 1; return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
...@@ -2823,10 +2817,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2823,10 +2817,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize); size - pull_len, truesize);
/* Avoid re-using remote pages, or the stack is still using the page /* Avoid re-using remote and pfmemalloc pages, or the stack is still
* when page_offset rollback to zero, flag default unreuse * using the page when page_offset rollback to zero, flag default
* unreuse
*/ */
if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) || if (!dev_page_is_reusable(desc_cb->priv) ||
(!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return; return;
...@@ -3083,8 +3078,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -3083,8 +3078,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (length <= HNS3_RX_HEAD_SIZE) { if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */ /* We can reuse buffer as-is, just make sure it is reusable */
if (likely(hns3_page_is_reusable(desc_cb->priv))) if (dev_page_is_reusable(desc_cb->priv))
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */ else /* This page cannot be reused so discard it */
__page_frag_cache_drain(desc_cb->priv, __page_frag_cache_drain(desc_cb->priv,
......
...@@ -194,17 +194,12 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, ...@@ -194,17 +194,12 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
static inline bool fm10k_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
struct page *page, struct page *page,
unsigned int __maybe_unused truesize) unsigned int __maybe_unused truesize)
{ {
/* avoid re-using remote pages */ /* avoid re-using remote and pfmemalloc pages */
if (unlikely(fm10k_page_is_reserved(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
...@@ -265,8 +260,8 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, ...@@ -265,8 +260,8 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
if (likely(size <= FM10K_RX_HDR_LEN)) { if (likely(size <= FM10K_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */ /* page is reusable, we can reuse buffer as-is */
if (likely(!fm10k_page_is_reserved(page))) if (dev_page_is_reusable(page))
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
......
...@@ -1843,19 +1843,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, ...@@ -1843,19 +1843,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
return false; return false;
} }
/**
* i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
*
* A page is not reusable if it was allocated under low memory
* conditions, or it's not in the same NUMA node as this CPU.
*/
static inline bool i40e_page_is_reusable(struct page *page)
{
return (page_to_nid(page) == numa_mem_id()) &&
!page_is_pfmemalloc(page);
}
/** /**
* i40e_can_reuse_rx_page - Determine if this page can be reused by * i40e_can_reuse_rx_page - Determine if this page can be reused by
* the adapter for another receive * the adapter for another receive
...@@ -1891,7 +1878,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1891,7 +1878,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -1141,19 +1141,6 @@ static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, ...@@ -1141,19 +1141,6 @@ static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
/**
* iavf_page_is_reusable - check if any reuse is possible
* @page: page struct to check
*
* A page is not reusable if it was allocated under low memory
* conditions, or it's not in the same NUMA node as this CPU.
*/
static inline bool iavf_page_is_reusable(struct page *page)
{
return (page_to_nid(page) == numa_mem_id()) &&
!page_is_pfmemalloc(page);
}
/** /**
* iavf_can_reuse_rx_page - Determine if this page can be reused by * iavf_can_reuse_rx_page - Determine if this page can be reused by
* the adapter for another receive * the adapter for another receive
...@@ -1187,7 +1174,7 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) ...@@ -1187,7 +1174,7 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!iavf_page_is_reusable(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -728,15 +728,6 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) ...@@ -728,15 +728,6 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
return !!cleaned_count; return !!cleaned_count;
} }
/**
* ice_page_is_reserved - check if reuse is possible
* @page: page struct to check
*/
static bool ice_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/** /**
* ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
* @rx_buf: Rx buffer to adjust * @rx_buf: Rx buffer to adjust
...@@ -775,8 +766,8 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) ...@@ -775,8 +766,8 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
unsigned int pagecnt_bias = rx_buf->pagecnt_bias; unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page; struct page *page = rx_buf->page;
/* avoid re-using remote pages */ /* avoid re-using remote and pfmemalloc pages */
if (unlikely(ice_page_is_reserved(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -8215,18 +8215,13 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, ...@@ -8215,18 +8215,13 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
static inline bool igb_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* avoid re-using remote pages */ /* avoid re-using remote and pfmemalloc pages */
if (unlikely(igb_page_is_reserved(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -1648,18 +1648,13 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring, ...@@ -1648,18 +1648,13 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
static inline bool igc_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer) static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* avoid re-using remote pages */ /* avoid re-using remote and pfmemalloc pages */
if (unlikely(igc_page_is_reserved(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -1940,19 +1940,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, ...@@ -1940,19 +1940,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
static inline bool ixgbe_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
int rx_buffer_pgcnt) int rx_buffer_pgcnt)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* avoid re-using remote pages */ /* avoid re-using remote and pfmemalloc pages */
if (unlikely(ixgbe_page_is_reserved(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -781,18 +781,13 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, ...@@ -781,18 +781,13 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer) static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* avoid re-using remote pages */ /* avoid re-using remote and pfmemalloc pages */
if (unlikely(ixgbevf_page_is_reserved(page))) if (!dev_page_is_reusable(page))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -213,11 +213,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, ...@@ -213,11 +213,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
} }
static inline bool mlx5e_page_is_reserved(struct page *page)
{
return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
}
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info) struct mlx5e_dma_info *dma_info)
{ {
...@@ -230,7 +225,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, ...@@ -230,7 +225,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false; return false;
} }
if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { if (!dev_page_is_reusable(dma_info->page)) {
stats->cache_waive++; stats->cache_waive++;
return false; return false;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册