提交 12bb6fc8 编写于 作者: H Hao Chen 提交者: Zheng Zengkai

net: hns3: refactor hns3_nic_reuse_page()

mainline inclusion
from mainline-master
commit e74a726d
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4M1HB
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e74a726da2c4

----------------------------------------------------------------------

Split rx copybreak handle into a separate function from function
hns3_nic_reuse_page() to improve code simplicity.
Signed-off-by: NHao Chen <chenhao288@hisilicon.com>
Signed-off-by: NGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Reviewed-by: NYongxin Li <liyongxin1@huawei.com>
Signed-off-by: NJunxin Chen <chenjunxin1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 75752b88
...@@ -3545,6 +3545,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) ...@@ -3545,6 +3545,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
return page_count(cb->priv) == cb->pagecnt_bias; return page_count(cb->priv) == cb->pagecnt_bias;
} }
static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring,
int pull_len,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
u32 frag_offset = desc_cb->page_offset + pull_len;
int size = le16_to_cpu(desc->rx.size);
u32 frag_size = size - pull_len;
void *frag = napi_alloc_frag(frag_size);
if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
return -ENOMEM;
}
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return 0;
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i, static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
...@@ -3554,6 +3586,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -3554,6 +3586,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len; u32 frag_size = size - pull_len;
int ret = 0;
bool reused; bool reused;
if (ring->page_pool) { if (ring->page_pool) {
...@@ -3588,29 +3621,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -3588,29 +3621,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->page_offset = 0; desc_cb->page_offset = 0;
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) { } else if (frag_size <= ring->rx_copybreak) {
void *frag = napi_alloc_frag(frag_size); ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
if (ret)
if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n");
goto out; goto out;
} }
desc_cb->reuse_flag = 1;
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return;
}
out: out:
desc_cb->pagecnt_bias--; desc_cb->pagecnt_bias--;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册