提交 156a66de 编写于 作者: Y Yunsheng Lin 提交者: Zheng Zengkai

page_pool: keep pp info as long as page pool owns the page

mainline inclusion
from mainline-v5.15-rc1
commit 57f05bc2
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4CVS3
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=57f05bc2ab24

----------------------------------------------------------------------

Currently, page->pp is cleared and set everytime the page
is recycled, which is unnecessary.

So only set the page->pp when the page is added to the page
pool and only clear it when the page is released from the
page pool.

This is also a preparation to support allocating frag page
in page pool.
Reviewed-by: NIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: NYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: NJakub Kicinski <kuba@kernel.org>
Reviewed-by: NYongxin Li <liyongxin1@huawei.com>
Signed-off-by: NJunxin Chen <chenjunxin1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 64e670dd
......@@ -2314,7 +2314,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
if (!skb)
return ERR_PTR(-ENOMEM);
skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
skb_mark_for_recycle(skb);
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
......@@ -2326,10 +2326,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE);
/* We don't need to reset pp_recycle here. It's already set, so
* just mark fragments for recycling.
*/
page_pool_store_mem_info(skb_frag_page(frag), pool);
}
return skb;
......
......@@ -3643,7 +3643,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
if (pp)
skb_mark_for_recycle(skb, page, pp);
skb_mark_for_recycle(skb);
else
dma_unmap_single_attrs(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE,
......
......@@ -439,7 +439,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
skb_mark_for_recycle(skb, page, pool);
skb_mark_for_recycle(skb);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
......
......@@ -382,7 +382,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
skb_mark_for_recycle(skb, page, pool);
skb_mark_for_recycle(skb);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
......
......@@ -4673,11 +4673,9 @@ static inline bool skb_csum_is_sctp(struct sk_buff *skb)
}
#ifdef CONFIG_PAGE_POOL
static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
struct page_pool *pp)
static inline void skb_mark_for_recycle(struct sk_buff *skb)
{
skb->pp_recycle = 1;
page_pool_store_mem_info(page, pp);
}
#endif
......
......@@ -253,11 +253,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
spin_unlock_bh(&pool->ring.producer_lock);
}
/* Store mem_info on struct page and use it while recycling skb frags */
static inline
void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
{
page->pp = pp;
}
#endif /* _NET_PAGE_POOL_H */
......@@ -206,6 +206,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
return true;
}
static void page_pool_set_pp_info(struct page_pool *pool,
struct page *page)
{
page->pp = pool;
page->pp_magic |= PP_SIGNATURE;
}
static void page_pool_clear_pp_info(struct page *page)
{
page->pp_magic = 0;
page->pp = NULL;
}
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
......@@ -222,7 +235,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
return NULL;
}
page->pp_magic |= PP_SIGNATURE;
page_pool_set_pp_info(pool, page);
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
......@@ -266,7 +279,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page);
continue;
}
page->pp_magic |= PP_SIGNATURE;
page_pool_set_pp_info(pool, page);
pool->alloc.cache[pool->alloc.count++] = page;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
......@@ -345,7 +359,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
page->pp_magic = 0;
page_pool_clear_pp_info(page);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
......@@ -652,7 +666,6 @@ bool page_pool_return_skb_page(struct page *page)
* The page will be returned to the pool here regardless of the
* 'flipped' fragment being in use or not.
*/
page->pp = NULL;
page_pool_put_full_page(pp, page, false);
return true;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册