提交 6332dee8 编写于 作者: A Adit Ranadive 提交者: Doug Ledford

RDMA/vmw_pvrdma: Cleanup unused variables

Removed the unused nreq and redundant index variables.
Moved hardcoded async and cq ring pages number to macro.
Reported-by: NYuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: NAdit Ranadive <aditr@vmware.com>
Reviewed-by: NAditya Sarwade <asarwade@vmware.com>
Tested-by: NAndrew Boyer <andrew.boyer@dell.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 cb886455
......@@ -69,6 +69,8 @@
*/
#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
#define PVRDMA_NUM_RING_PAGES 4
struct pvrdma_dev;
struct pvrdma_page_dir {
......
......@@ -858,7 +858,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev->dsr->resp_slot_dma = (u64)slot_dma;
/* Async event ring */
dev->dsr->async_ring_pages.num_pages = 4;
dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
dev->dsr->async_ring_pages.num_pages, true);
if (ret)
......@@ -867,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
/* CQ notification ring */
dev->dsr->cq_ring_pages.num_pages = 4;
dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
dev->dsr->cq_ring_pages.num_pages, true);
if (ret)
......
......@@ -554,13 +554,13 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return ret;
}
static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
{
return pvrdma_page_dir_get_ptr(&qp->pdir,
qp->sq.offset + n * qp->sq.wqe_size);
}
static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
{
return pvrdma_page_dir_get_ptr(&qp->pdir,
qp->rq.offset + n * qp->rq.wqe_size);
......@@ -598,9 +598,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
struct pvrdma_sq_wqe_hdr *wqe_hdr;
struct pvrdma_sge *sge;
int i, index;
int nreq;
int ret;
int i, ret;
/*
* In states lower than RTS, we can fail immediately. In other states,
......@@ -613,9 +611,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
for (nreq = 0; wr; nreq++, wr = wr->next) {
unsigned int tail;
while (wr) {
unsigned int tail = 0;
if (unlikely(!pvrdma_idx_ring_has_space(
qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
......@@ -680,7 +677,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
}
wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
memset(wqe_hdr, 0, sizeof(*wqe_hdr));
wqe_hdr->wr_id = wr->wr_id;
wqe_hdr->num_sge = wr->num_sge;
......@@ -771,12 +768,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
/* Make sure wqe is written before index update */
smp_wmb();
index++;
if (unlikely(index >= qp->sq.wqe_cnt))
index = 0;
/* Update shared sq ring */
pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
qp->sq.wqe_cnt);
wr = wr->next;
}
ret = 0;
......@@ -806,7 +802,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct pvrdma_qp *qp = to_vqp(ibqp);
struct pvrdma_rq_wqe_hdr *wqe_hdr;
struct pvrdma_sge *sge;
int index, nreq;
int ret = 0;
int i;
......@@ -821,9 +816,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags);
index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
for (nreq = 0; wr; nreq++, wr = wr->next) {
unsigned int tail;
while (wr) {
unsigned int tail = 0;
if (unlikely(wr->num_sge > qp->rq.max_sg ||
wr->num_sge < 0)) {
......@@ -843,7 +837,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
goto out;
}
wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
wqe_hdr->wr_id = wr->wr_id;
wqe_hdr->num_sge = wr->num_sge;
wqe_hdr->total_len = 0;
......@@ -859,12 +853,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* Make sure wqe is written before index update */
smp_wmb();
index++;
if (unlikely(index >= qp->rq.wqe_cnt))
index = 0;
/* Update shared rq ring */
pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
qp->rq.wqe_cnt);
wr = wr->next;
}
spin_unlock_irqrestore(&qp->rq.lock, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册