提交 5fdca653 编写于 作者: C Chuck Lever 提交者: J. Bruce Fields

svcrdma: Renovate sendto chunk list parsing

The current sendto code appears to support clients that provide only
one of a Read list, a Write list, or a Reply chunk. My reading of
that code is that it doesn't support the following cases:

 - Read list + Write list
 - Read list + Reply chunk
 - Write list + Reply chunk
 - Read list + Write list + Reply chunk

The protocol allows more than one Read or Write chunk in those
lists. Some clients do send a Read list and Reply chunk
simultaneously. NFSv4 WRITE uses a Read list for the data payload,
and a Reply chunk because the GETATTR result in the reply can
contain a large object like an ACL.

Generalize one of the sendto code paths needed to support all of
the above cases, and attempt to ensure that only one pass is done
through the RPC Call's transport header to gather chunk list
information for building the reply.
Signed-off-by: NChuck Lever <chuck.lever@oracle.com>
Signed-off-by: NJ. Bruce Fields <bfields@redhat.com>
上级 4d712ef1
...@@ -236,8 +236,6 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, ...@@ -236,8 +236,6 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
struct svc_rdma_req_map *, bool); struct svc_rdma_req_map *, bool);
extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_sendto(struct svc_rqst *);
extern struct rpcrdma_read_chunk *
svc_rdma_get_read_chunk(struct rpcrdma_msg *);
extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
int); int);
......
...@@ -415,6 +415,20 @@ rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head, ...@@ -415,6 +415,20 @@ rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
return 1; return 1;
} }
/* Returns the address of the first read chunk or <nul> if no read chunk
* is present
*/
static struct rpcrdma_read_chunk *
svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
{
struct rpcrdma_read_chunk *ch =
(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
if (ch->rc_discrim == xdr_zero)
return NULL;
return ch;
}
static int rdma_read_chunks(struct svcxprt_rdma *xprt, static int rdma_read_chunks(struct svcxprt_rdma *xprt,
struct rpcrdma_msg *rmsgp, struct rpcrdma_msg *rmsgp,
struct svc_rqst *rqstp, struct svc_rqst *rqstp,
......
...@@ -153,76 +153,35 @@ static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt, ...@@ -153,76 +153,35 @@ static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
return dma_addr; return dma_addr;
} }
/* Returns the address of the first read chunk or <nul> if no read chunk /* Parse the RPC Call's transport header.
* is present
*/ */
struct rpcrdma_read_chunk * static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp,
svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp) struct rpcrdma_write_array **write,
struct rpcrdma_write_array **reply)
{ {
struct rpcrdma_read_chunk *ch = __be32 *p;
(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
if (ch->rc_discrim == xdr_zero) p = (__be32 *)&rmsgp->rm_body.rm_chunks[0];
return NULL;
return ch;
}
/* Returns the address of the first read write array element or <nul>
* if no write array list is present
*/
static struct rpcrdma_write_array *
svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
{
if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
rmsgp->rm_body.rm_chunks[1] == xdr_zero)
return NULL;
return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
}
/* Returns the address of the first reply array element or <nul> if no /* Read list */
* reply array is present while (*p++ != xdr_zero)
*/ p += 5;
static struct rpcrdma_write_array *
svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
struct rpcrdma_write_array *wr_ary)
{
struct rpcrdma_read_chunk *rch;
struct rpcrdma_write_array *rp_ary;
/* XXX: Need to fix when reply chunk may occur with read list /* Write list */
* and/or write list. if (*p != xdr_zero) {
*/ *write = (struct rpcrdma_write_array *)p;
if (rmsgp->rm_body.rm_chunks[0] != xdr_zero || while (*p++ != xdr_zero)
rmsgp->rm_body.rm_chunks[1] != xdr_zero) p += 1 + be32_to_cpu(*p) * 4;
return NULL; } else {
*write = NULL;
rch = svc_rdma_get_read_chunk(rmsgp); p++;
if (rch) {
while (rch->rc_discrim != xdr_zero)
rch++;
/* The reply chunk follows an empty write array located
* at 'rc_position' here. The reply array is at rc_target.
*/
rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
goto found_it;
}
if (wr_ary) {
int chunk = be32_to_cpu(wr_ary->wc_nchunks);
rp_ary = (struct rpcrdma_write_array *)
&wr_ary->wc_array[chunk].wc_target.rs_length;
goto found_it;
} }
/* No read list, no write list */ /* Reply chunk */
rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2]; if (*p != xdr_zero)
*reply = (struct rpcrdma_write_array *)p;
found_it: else
if (rp_ary->wc_discrim == xdr_zero) *reply = NULL;
return NULL;
return rp_ary;
} }
/* RPC-over-RDMA Version One private extension: Remote Invalidation. /* RPC-over-RDMA Version One private extension: Remote Invalidation.
...@@ -244,8 +203,8 @@ static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp, ...@@ -244,8 +203,8 @@ static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
inv_rkey = 0; inv_rkey = 0;
rd_ary = svc_rdma_get_read_chunk(rdma_argp); rd_ary = (struct rpcrdma_read_chunk *)&rdma_argp->rm_body.rm_chunks[0];
if (rd_ary) { if (rd_ary->rc_discrim != xdr_zero) {
inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle); inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle);
goto out; goto out;
} }
...@@ -622,8 +581,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -622,8 +581,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
* places this at the start of page 0. * places this at the start of page 0.
*/ */
rdma_argp = page_address(rqstp->rq_pages[0]); rdma_argp = page_address(rqstp->rq_pages[0]);
wr_ary = svc_rdma_get_write_array(rdma_argp); svc_rdma_get_write_arrays(rdma_argp, &wr_ary, &rp_ary);
rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
inv_rkey = 0; inv_rkey = 0;
if (rdma->sc_snd_w_inv) if (rdma->sc_snd_w_inv)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册