提交 f5abfb4b 编写于 作者: Y Yunsheng Lin 提交者: Zheng Zengkai

net: hns3: optimize the rx page reuse handling process

mainline inclusion
from mainline-master
commit fa7711b8
category: feature
bugzilla: 173966
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=fa7711b888f24ee9291d90f8fbdaccfc80ed72c7

----------------------------------------------------------------------

Current rx page offset only reset to zero when all the below
conditions are satisfied:
1. rx page is only owned by driver.
2. rx page is reusable.
3. the page offset that is above to be given to the stack has
reached the end of the page.

If the page offset is over the hns3_buf_size(), it means the
buffer below the offset of the page is usable when the above
condition 1 & 2 are satisfied, so page offset can be reset to
zero instead of increasing the offset. We may be able to always
reuse the first 4K buffer of a 64K page, which means we can
limit the hot buffer size as much as possible.

The above optimization is a side effect when refacting the
rx page reuse handling in order to support the rx copybreak.
Signed-off-by: NYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: NGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Reviewed-by: NYongxin Li <liyongxin1@huawei.com>
Signed-off-by: NJunxin Chen <chenjunxin1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 9bdc4f22
...@@ -3525,7 +3525,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -3525,7 +3525,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{ {
return (page_count(cb->priv) - cb->pagecnt_bias) == 1; return page_count(cb->priv) == cb->pagecnt_bias;
} }
static void hns3_nic_reuse_page(struct sk_buff *skb, int i, static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
...@@ -3533,40 +3533,40 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -3533,40 +3533,40 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
{ {
struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
u32 frag_offset = desc_cb->page_offset + pull_len;
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len;
desc_cb->pagecnt_bias--; /* Avoid re-using remote or pfmem page */
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
size - pull_len, truesize); goto out;
/* Avoid re-using remote and pfmemalloc pages, or the stack is still /* Stack is not using and current page_offset is non-zero, we can
* using the page when page_offset rollback to zero, flag default * reuse from the zero offset.
* unreuse
*/ */
if (!dev_page_is_reusable(desc_cb->priv) || if (desc_cb->page_offset && hns3_can_reuse_page(desc_cb)) {
(!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) { desc_cb->page_offset = 0;
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
}
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
} else if (hns3_can_reuse_page(desc_cb)) { } else if (desc_cb->page_offset + truesize * 2 <=
hns3_page_size(ring)) {
desc_cb->page_offset += truesize;
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
desc_cb->page_offset = 0;
} else if (desc_cb->pagecnt_bias) {
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
} }
out:
desc_cb->pagecnt_bias--;
if (unlikely(!desc_cb->pagecnt_bias)) { if (unlikely(!desc_cb->pagecnt_bias)) {
page_ref_add(desc_cb->priv, USHRT_MAX); page_ref_add(desc_cb->priv, USHRT_MAX);
desc_cb->pagecnt_bias = USHRT_MAX; desc_cb->pagecnt_bias = USHRT_MAX;
} }
skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
frag_size, truesize);
if (unlikely(!desc_cb->reuse_flag))
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
} }
static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册