提交 66f9513a 编写于 作者: R Rasesh Mody 提交者: David S. Miller

bna: RX Processing and Config Changes

Change Details:
 - Prefetch header in GRO path. This reduces napi_frags_skb time from 9% to 5%.
 - Changed the configurable limit of RxQ depth to 16384 (was 2048).
 - bnad_rx_unmap_q elements are cachealigned.
Signed-off-by: NRasesh Mody <rmody@brocade.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e29aa339
......@@ -536,6 +536,11 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
unmap_q = rcb->unmap_q;
bnad = rcb->bnad;
/* prefetch header */
prefetch(page_address(unmap_q->unmap[sop_ci].page) +
unmap_q->unmap[sop_ci].page_offset);
for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
unmap = &unmap_q->unmap[ci];
BNA_QE_INDX_INC(ci, rcb->q_depth);
......
......@@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
#define BNAD_MAX_RXQ_DEPTH 2048
#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
......@@ -237,9 +237,9 @@ struct bnad_rx_vector {
struct bnad_rx_unmap {
struct page *page;
u32 page_offset;
struct sk_buff *skb;
struct bnad_rx_vector vector;
u32 page_offset;
};
enum bnad_rxbuf_type {
......@@ -257,7 +257,7 @@ struct bnad_rx_unmap_q {
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
struct bnad_rx_unmap unmap[0];
struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册