提交 0e7f011a 编写于 作者: T Tom Tucker

svcrdma: Simplify receive buffer posting

The svcrdma transport provider currently allocates receive buffers
to the RQ through the xpo_release_rqst method. This approach is overly
complicated since it means that the rqstp rq_xprt_ctxt has to be
selectively set based on whether the RPC is going to be processed
immediately or deferred. Instead, just post the receive buffer when
we are certain that we are replying in the send_reply function.
Signed-off-by: NTom Tucker <tom@opengridcomputing.com>
上级 aa3314c8
......@@ -457,8 +457,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
rqstp->rq_arg.head[0].iov_len);
/* Indicate that we've consumed an RQ credit */
rqstp->rq_xprt_ctxt = rqstp->rq_xprt;
svc_xprt_received(rqstp->rq_xprt);
return ret;
}
......@@ -480,13 +478,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
dprintk("svcrdma: rqstp=%p\n", rqstp);
/*
* The rq_xprt_ctxt indicates if we've consumed an RQ credit
* or not. It is used in the rdma xpo_release_rqst function to
* determine whether or not to return an RQ WQE to the RQ.
*/
rqstp->rq_xprt_ctxt = NULL;
spin_lock_bh(&rdma_xprt->sc_read_complete_lock);
if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
......@@ -550,9 +541,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
return 0;
}
/* Indicate we've consumed an RQ credit */
rqstp->rq_xprt_ctxt = rqstp->rq_xprt;
ret = rqstp->rq_arg.head[0].iov_len
+ rqstp->rq_arg.page_len
+ rqstp->rq_arg.tail[0].iov_len;
......@@ -569,11 +557,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
return ret;
close_out:
if (ctxt) {
if (ctxt)
svc_rdma_put_context(ctxt, 1);
/* Indicate we've consumed an RQ credit */
rqstp->rq_xprt_ctxt = rqstp->rq_xprt;
}
dprintk("svcrdma: transport %p is closing\n", xprt);
/*
* Set the close bit and enqueue it. svc_recv will see the
......
......@@ -389,6 +389,16 @@ static int send_reply(struct svcxprt_rdma *rdma,
int page_no;
int ret;
/* Post a recv buffer to handle another request. */
ret = svc_rdma_post_recv(rdma);
if (ret) {
printk(KERN_INFO
"svcrdma: could not post a receive buffer, err=%d."
"Closing transport %p.\n", ret, rdma);
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
return 0;
}
/* Prepare the context */
ctxt->pages[0] = page;
ctxt->count = 1;
......
......@@ -910,27 +910,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
return NULL;
}
/*
* Post an RQ WQE to the RQ when the rqst is being released. This
* effectively returns an RQ credit to the client. The rq_xprt_ctxt
* will be null if the request is deferred due to an RDMA_READ or the
* transport had no data ready (EAGAIN). Note that an RPC deferred in
* svc_process will still return the credit, this is because the data
* is copied and no longer consume a WQE/WC.
*/
static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
{
int err;
struct svcxprt_rdma *rdma =
container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
if (rqstp->rq_xprt_ctxt) {
BUG_ON(rqstp->rq_xprt_ctxt != rdma);
err = svc_rdma_post_recv(rdma);
if (err)
dprintk("svcrdma: failed to post an RQ WQE error=%d\n",
err);
}
rqstp->rq_xprt_ctxt = NULL;
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册