提交 b6e40382 编写于 作者: D David S. Miller

Merge branch 'thunderx-perf'

Sunil Goutham says:

====================
net: thunderx: Performance enhancement changes

Below patches attempts to improve performance by reducing
no of atomic operations while allocating new receive buffers
and reducing cache misses by adjusting nicvf structure elements.

Changes from v1:
 No changes, resubmitting a fresh as per David's suggestion.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -272,45 +272,54 @@ struct nicvf { ...@@ -272,45 +272,54 @@ struct nicvf {
struct nicvf *pnicvf; struct nicvf *pnicvf;
struct net_device *netdev; struct net_device *netdev;
struct pci_dev *pdev; struct pci_dev *pdev;
void __iomem *reg_base;
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
u8 vf_id; u8 vf_id;
u8 node; u8 sqs_id;
u8 tns_mode:1; bool sqs_mode;
u8 sqs_mode:1;
u8 loopback_supported:1;
bool hw_tso; bool hw_tso;
u16 mtu;
struct queue_set *qs; /* Receive buffer alloc */
u32 rb_page_offset;
u16 rb_pageref;
bool rb_alloc_fail;
bool rb_work_scheduled;
struct page *rb_page;
struct delayed_work rbdr_work;
struct tasklet_struct rbdr_task;
/* Secondary Qset */
u8 sqs_count;
#define MAX_SQS_PER_VF_SINGLE_NODE 5 #define MAX_SQS_PER_VF_SINGLE_NODE 5
#define MAX_SQS_PER_VF 11 #define MAX_SQS_PER_VF 11
u8 sqs_id;
u8 sqs_count; /* Secondary Qset count */
struct nicvf *snicvf[MAX_SQS_PER_VF]; struct nicvf *snicvf[MAX_SQS_PER_VF];
/* Queue count */
u8 rx_queues; u8 rx_queues;
u8 tx_queues; u8 tx_queues;
u8 max_queues; u8 max_queues;
void __iomem *reg_base;
u8 node;
u8 cpi_alg;
u16 mtu;
bool link_up; bool link_up;
u8 duplex; u8 duplex;
u32 speed; u32 speed;
struct page *rb_page; bool tns_mode;
u32 rb_page_offset; bool loopback_supported;
bool rb_alloc_fail;
bool rb_work_scheduled;
struct delayed_work rbdr_work;
struct tasklet_struct rbdr_task;
struct tasklet_struct qs_err_task;
struct tasklet_struct cq_task;
struct nicvf_cq_poll *napi[8];
struct nicvf_rss_info rss_info; struct nicvf_rss_info rss_info;
u8 cpi_alg; struct tasklet_struct qs_err_task;
struct work_struct reset_task;
/* Interrupt coalescing settings */ /* Interrupt coalescing settings */
u32 cq_coalesce_usecs; u32 cq_coalesce_usecs;
u32 msg_enable; u32 msg_enable;
/* Stats */
struct nicvf_hw_stats hw_stats; struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats; struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats; struct bgx_stats bgx_stats;
struct work_struct reset_task;
/* MSI-X */ /* MSI-X */
bool msix_enabled; bool msix_enabled;
......
...@@ -18,6 +18,15 @@ ...@@ -18,6 +18,15 @@
#include "q_struct.h" #include "q_struct.h"
#include "nicvf_queues.h" #include "nicvf_queues.h"
static void nicvf_get_page(struct nicvf *nic)
{
if (!nic->rb_pageref || !nic->rb_page)
return;
atomic_add(nic->rb_pageref, &nic->rb_page->_count);
nic->rb_pageref = 0;
}
/* Poll a register for a specific value */ /* Poll a register for a specific value */
static int nicvf_poll_reg(struct nicvf *nic, int qidx, static int nicvf_poll_reg(struct nicvf *nic, int qidx,
u64 reg, int bit_pos, int bits, int val) u64 reg, int bit_pos, int bits, int val)
...@@ -81,16 +90,15 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, ...@@ -81,16 +90,15 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
/* Check if request can be accomodated in previous allocated page */ /* Check if request can be accomodated in previous allocated page */
if (nic->rb_page) { if (nic->rb_page &&
if ((nic->rb_page_offset + buf_len + buf_len) > ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
(PAGE_SIZE << order)) { nic->rb_pageref++;
nic->rb_page = NULL; goto ret;
} else {
nic->rb_page_offset += buf_len;
get_page(nic->rb_page);
}
} }
nicvf_get_page(nic);
nic->rb_page = NULL;
/* Allocate a new page */ /* Allocate a new page */
if (!nic->rb_page) { if (!nic->rb_page) {
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
...@@ -102,7 +110,9 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, ...@@ -102,7 +110,9 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page_offset = 0; nic->rb_page_offset = 0;
} }
ret:
*rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
nic->rb_page_offset += buf_len;
return 0; return 0;
} }
...@@ -158,6 +168,9 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, ...@@ -158,6 +168,9 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
desc = GET_RBDR_DESC(rbdr, idx); desc = GET_RBDR_DESC(rbdr, idx);
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
} }
nicvf_get_page(nic);
return 0; return 0;
} }
...@@ -241,6 +254,8 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) ...@@ -241,6 +254,8 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
new_rb++; new_rb++;
} }
nicvf_get_page(nic);
/* make sure all memory stores are done before ringing doorbell */ /* make sure all memory stores are done before ringing doorbell */
smp_wmb(); smp_wmb();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册