提交 5b370532 编写于 作者: Y Yunsheng Lin 提交者: Xie XiuQi

net: hns3: Add support for using order 1 pages with a 4K buffer

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Hardware supports 512, 1024, 2048, 4096 rx buffer size, the
rx buffer can not be reused because the hns3_page_order
return 0 when page size and rx buffer size are both 4096.

So this patch changes the hns3_page_order to return 1 when
rx buffer is greater than half of the page size and page size
is less the 8192, and dev_alloc_pages has already been used
to allocate the compound page for rx buffer.

This patch also changes hnae3_* to hns3_* for page order
and rx buffer size calculation because they are used in
hns3 module.

Feature or Bugfix:Bugfix
Signed-off-by: NYunsheng Lin <linyunsheng@huawei.com>
Reviewed-by: Nlipeng <lipeng321@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 8ad1bf95
...@@ -2151,7 +2151,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -2151,7 +2151,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring, static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb) struct hns3_desc_cb *cb)
{ {
unsigned int order = hnae3_page_order(ring); unsigned int order = hns3_page_order(ring);
struct page *p; struct page *p;
p = dev_alloc_pages(order); p = dev_alloc_pages(order);
...@@ -2162,7 +2162,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, ...@@ -2162,7 +2162,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0; cb->page_offset = 0;
cb->reuse_flag = 0; cb->reuse_flag = 0;
cb->buf = page_address(p); cb->buf = page_address(p);
cb->length = hnae3_page_size(ring); cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE; cb->type = DESC_TYPE_PAGE;
return 0; return 0;
...@@ -2465,7 +2465,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2465,7 +2465,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
{ {
struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int size = le16_to_cpu(desc->rx.size); int size = le16_to_cpu(desc->rx.size);
u32 truesize = hnae3_buf_size(ring); u32 truesize = hns3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize); size - pull_len, truesize);
...@@ -2480,7 +2480,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2480,7 +2480,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Move offset up to the next cache line */ /* Move offset up to the next cache line */
desc_cb->page_offset += truesize; desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */ /* Bump ref count on page before it is given */
get_page(desc_cb->priv); get_page(desc_cb->priv);
...@@ -2768,7 +2768,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, ...@@ -2768,7 +2768,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
} }
if (ring->tail_skb) { if (ring->tail_skb) {
head_skb->truesize += hnae3_buf_size(ring); head_skb->truesize += hns3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size); head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size); head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb; skb = ring->tail_skb;
......
...@@ -627,9 +627,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) ...@@ -627,9 +627,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
#define hnae3_buf_size(_ring) ((_ring)->buf_size) #define hns3_buf_size(_ring) ((_ring)->buf_size)
#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring)) static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->buf_size > (PAGE_SIZE / 2))
return 1;
#endif
return 0;
}
#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
/* iterator for handling rings in ring group */ /* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \ #define hns3_for_each_ring(pos, head) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册