提交 4668e4b5 编写于 作者: C CQ Tang 提交者: Roland Dreier

IB/qib: Improve SDMA performance

1. The code accepts chunks of messages, and splits the chunk into
   packets when converting packets into sdma queue entries.  Adjacent
   packets will use user buffer pages smartly to avoid pinning the
   same page multiple times.

2. Instead of discarding all the work when SDMA queue is full, the
   work is saved in a pending queue.  Whenever there are enough SDMA
   queue free entries, pending queue is directly put onto SDMA queue.

3. An interrupt handler is used to progress this pending queue.
Reviewed-by: NMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: NCQ Tang <cq.tang@intel.com>
Signed-off-by: NMike Marciniszyn <mike.marciniszyn@intel.com>

[ Fixed up sparse warnings.  - Roland ]
Signed-off-by: NRoland Dreier <roland@purestorage.com>
上级 c095ba72
......@@ -576,11 +576,13 @@ struct qib_pportdata {
/* read/write using lock */
spinlock_t sdma_lock ____cacheline_aligned_in_smp;
struct list_head sdma_activelist;
struct list_head sdma_userpending;
u64 sdma_descq_added;
u64 sdma_descq_removed;
u16 sdma_descq_tail;
u16 sdma_descq_head;
u8 sdma_generation;
u8 sdma_intrequest;
struct tasklet_struct sdma_sw_clean_up_task
____cacheline_aligned_in_smp;
......@@ -1326,6 +1328,8 @@ int qib_setup_sdma(struct qib_pportdata *);
void qib_teardown_sdma(struct qib_pportdata *);
void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
struct list_head *pktlist);
int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */
......
......@@ -279,7 +279,7 @@ struct qib_base_info {
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
#define QIB_USER_SWMINOR 12
#define QIB_USER_SWMINOR 13
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
......@@ -701,7 +701,37 @@ struct qib_message_header {
__be32 bth[3];
/* fields below this point are in host byte order */
struct qib_header iph;
/* fields below are simplified, but should match PSM */
/* some are accessed by driver when packet spliting is needed */
__u8 sub_opcode;
__u8 flags;
__u16 commidx;
__u32 ack_seq_num;
__u8 flowid;
__u8 hdr_dlen;
__u16 mqhdr;
__u32 uwords[4];
};
/* sequence number bits for message */
union qib_seqnum {
struct {
__u32 seq:11;
__u32 gen:8;
__u32 flow:5;
};
struct {
__u32 pkt:16;
__u32 msg:8;
};
__u32 val;
};
/* qib receiving-dma tid-session-member */
struct qib_tid_session_member {
__u16 tid;
__u16 offset;
__u16 length;
};
/* IB - LRH header consts */
......
......@@ -1220,7 +1220,7 @@ static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
return user_swminor == 3;
default:
/* >= 4 are compatible (or are expected to be) */
return user_swminor >= 4;
return user_swminor <= QIB_USER_SWMINOR;
}
}
/* make no promises yet for future major versions */
......
......@@ -423,8 +423,11 @@ void qib_sdma_intr(struct qib_pportdata *ppd)
void __qib_sdma_intr(struct qib_pportdata *ppd)
{
if (__qib_sdma_running(ppd))
if (__qib_sdma_running(ppd)) {
qib_sdma_make_progress(ppd);
if (!list_empty(&ppd->sdma_userpending))
qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
}
}
int qib_setup_sdma(struct qib_pportdata *ppd)
......@@ -452,6 +455,9 @@ int qib_setup_sdma(struct qib_pportdata *ppd)
ppd->sdma_descq_removed = 0;
ppd->sdma_descq_added = 0;
ppd->sdma_intrequest = 0;
INIT_LIST_HEAD(&ppd->sdma_userpending);
INIT_LIST_HEAD(&ppd->sdma_activelist);
tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册