svc_rdma_sendto.c 19.4 KB
Newer Older
T
Tom Tucker 已提交
1
/*
S
Steve Wise 已提交
2
 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
T
Tom Tucker 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Author: Tom Tucker <tom@opengridcomputing.com>
 */

#include <linux/sunrpc/debug.h>
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>

#define RPCDBG_FACILITY	RPCDBG_SVCXPRT

53 54 55 56 57
static u32 xdr_padsize(u32 len)
{
	return (len & 3) ? (4 - (len & 3)) : 0;
}

C
Chuck Lever 已提交
58 59
int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
		     struct xdr_buf *xdr,
60 61
		     struct svc_rdma_req_map *vec,
		     bool write_chunk_present)
T
Tom Tucker 已提交
62 63 64 65
{
	int sge_no;
	u32 sge_bytes;
	u32 page_bytes;
66
	u32 page_off;
T
Tom Tucker 已提交
67 68
	int page_no;

69 70
	if (xdr->len !=
	    (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
C
Chuck Lever 已提交
71
		pr_err("svcrdma: %s: XDR buffer length error\n", __func__);
72 73
		return -EIO;
	}
74

T
Tom Tucker 已提交
75 76 77 78
	/* Skip the first sge, this is for the RPCRDMA header */
	sge_no = 1;

	/* Head SGE */
79 80
	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
T
Tom Tucker 已提交
81 82 83 84 85 86
	sge_no++;

	/* pages SGE */
	page_no = 0;
	page_bytes = xdr->page_len;
	page_off = xdr->page_base;
87 88 89 90
	while (page_bytes) {
		vec->sge[sge_no].iov_base =
			page_address(xdr->pages[page_no]) + page_off;
		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
T
Tom Tucker 已提交
91
		page_bytes -= sge_bytes;
92
		vec->sge[sge_no].iov_len = sge_bytes;
T
Tom Tucker 已提交
93 94 95 96 97 98 99

		sge_no++;
		page_no++;
		page_off = 0; /* reset for next time through loop */
	}

	/* Tail SGE */
100
	if (xdr->tail[0].iov_len) {
101 102 103 104 105 106 107 108 109 110 111 112 113 114
		unsigned char *base = xdr->tail[0].iov_base;
		size_t len = xdr->tail[0].iov_len;
		u32 xdr_pad = xdr_padsize(xdr->page_len);

		if (write_chunk_present && xdr_pad) {
			base += xdr_pad;
			len -= xdr_pad;
		}

		if (len) {
			vec->sge[sge_no].iov_base = base;
			vec->sge[sge_no].iov_len = len;
			sge_no++;
		}
T
Tom Tucker 已提交
115 116
	}

C
Chuck Lever 已提交
117
	dprintk("svcrdma: %s: sge_no %d page_no %d "
118
		"page_base %u page_len %u head_len %zu tail_len %zu\n",
C
Chuck Lever 已提交
119
		__func__, sge_no, page_no, xdr->page_base, xdr->page_len,
120 121
		xdr->head[0].iov_len, xdr->tail[0].iov_len);

122
	vec->count = sge_no;
123
	return 0;
T
Tom Tucker 已提交
124 125
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139
static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
			      struct xdr_buf *xdr,
			      u32 xdr_off, size_t len, int dir)
{
	struct page *page;
	dma_addr_t dma_addr;
	if (xdr_off < xdr->head[0].iov_len) {
		/* This offset is in the head */
		xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
		page = virt_to_page(xdr->head[0].iov_base);
	} else {
		xdr_off -= xdr->head[0].iov_len;
		if (xdr_off < xdr->page_len) {
			/* This offset is in the page list */
140
			xdr_off += xdr->page_base;
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
			page = xdr->pages[xdr_off >> PAGE_SHIFT];
			xdr_off &= ~PAGE_MASK;
		} else {
			/* This offset is in the tail */
			xdr_off -= xdr->page_len;
			xdr_off += (unsigned long)
				xdr->tail[0].iov_base & ~PAGE_MASK;
			page = virt_to_page(xdr->tail[0].iov_base);
		}
	}
	dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
				   min_t(size_t, PAGE_SIZE, len), dir);
	return dma_addr;
}

156
/* Parse the RPC Call's transport header.
157
 */
158 159 160
static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp,
				      struct rpcrdma_write_array **write,
				      struct rpcrdma_write_array **reply)
161
{
162
	__be32 *p;
163

164
	p = (__be32 *)&rmsgp->rm_body.rm_chunks[0];
165

166 167 168
	/* Read list */
	while (*p++ != xdr_zero)
		p += 5;
169

170 171 172 173 174 175 176 177
	/* Write list */
	if (*p != xdr_zero) {
		*write = (struct rpcrdma_write_array *)p;
		while (*p++ != xdr_zero)
			p += 1 + be32_to_cpu(*p) * 4;
	} else {
		*write = NULL;
		p++;
178 179
	}

180 181 182 183 184
	/* Reply chunk */
	if (*p != xdr_zero)
		*reply = (struct rpcrdma_write_array *)p;
	else
		*reply = NULL;
185 186
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
/* RPC-over-RDMA Version One private extension: Remote Invalidation.
 * Responder's choice: requester signals it can handle Send With
 * Invalidate, and responder chooses one rkey to invalidate.
 *
 * Find a candidate rkey to invalidate when sending a reply.  Picks the
 * first rkey it finds in the chunks lists.
 *
 * Returns zero if RPC's chunk lists are empty.
 */
static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
				 struct rpcrdma_write_array *wr_ary,
				 struct rpcrdma_write_array *rp_ary)
{
	struct rpcrdma_read_chunk *rd_ary;
	struct rpcrdma_segment *arg_ch;
	u32 inv_rkey;

	inv_rkey = 0;

206 207
	rd_ary = (struct rpcrdma_read_chunk *)&rdma_argp->rm_body.rm_chunks[0];
	if (rd_ary->rc_discrim != xdr_zero) {
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
		inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle);
		goto out;
	}

	if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
		arg_ch = &wr_ary->wc_array[0].wc_target;
		inv_rkey = be32_to_cpu(arg_ch->rs_handle);
		goto out;
	}

	if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
		arg_ch = &rp_ary->wc_array[0].wc_target;
		inv_rkey = be32_to_cpu(arg_ch->rs_handle);
		goto out;
	}

out:
	dprintk("svcrdma: Send With Invalidate rkey=%08x\n", inv_rkey);
	return inv_rkey;
}

T
Tom Tucker 已提交
229 230 231 232 233 234
/* Assumptions:
 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
 */
static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
		      u32 rmr, u64 to,
		      u32 xdr_off, int write_len,
235
		      struct svc_rdma_req_map *vec)
T
Tom Tucker 已提交
236
{
C
Christoph Hellwig 已提交
237
	struct ib_rdma_wr write_wr;
T
Tom Tucker 已提交
238 239 240 241 242 243 244 245
	struct ib_sge *sge;
	int xdr_sge_no;
	int sge_no;
	int sge_bytes;
	int sge_off;
	int bc;
	struct svc_rdma_op_ctxt *ctxt;

246 247 248 249 250
	if (vec->count > RPCSVC_MAXPAGES) {
		pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
		return -EIO;
	}

T
Tom Tucker 已提交
251
	dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
252
		"write_len=%d, vec->sge=%p, vec->count=%lu\n",
253
		rmr, (unsigned long long)to, xdr_off,
254
		write_len, vec->sge, vec->count);
T
Tom Tucker 已提交
255 256

	ctxt = svc_rdma_get_context(xprt);
257 258
	ctxt->direction = DMA_TO_DEVICE;
	sge = ctxt->sge;
T
Tom Tucker 已提交
259 260

	/* Find the SGE associated with xdr_off */
261
	for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
T
Tom Tucker 已提交
262
	     xdr_sge_no++) {
263
		if (vec->sge[xdr_sge_no].iov_len > bc)
T
Tom Tucker 已提交
264
			break;
265
		bc -= vec->sge[xdr_sge_no].iov_len;
T
Tom Tucker 已提交
266 267 268 269 270 271 272
	}

	sge_off = bc;
	bc = write_len;
	sge_no = 0;

	/* Copy the remaining SGE */
273 274 275
	while (bc != 0) {
		sge_bytes = min_t(size_t,
			  bc, vec->sge[xdr_sge_no].iov_len-sge_off);
T
Tom Tucker 已提交
276
		sge[sge_no].length = sge_bytes;
S
Steve Wise 已提交
277 278 279 280 281 282 283
		sge[sge_no].addr =
			dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
				    sge_bytes, DMA_TO_DEVICE);
		xdr_off += sge_bytes;
		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
					 sge[sge_no].addr))
			goto err;
284
		svc_rdma_count_mappings(xprt, ctxt);
C
Christoph Hellwig 已提交
285
		sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
286
		ctxt->count++;
T
Tom Tucker 已提交
287 288 289
		sge_off = 0;
		sge_no++;
		xdr_sge_no++;
290 291 292 293
		if (xdr_sge_no > vec->count) {
			pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
			goto err;
		}
T
Tom Tucker 已提交
294
		bc -= sge_bytes;
295 296
		if (sge_no == xprt->sc_max_sge)
			break;
T
Tom Tucker 已提交
297 298 299 300
	}

	/* Prepare WRITE WR */
	memset(&write_wr, 0, sizeof write_wr);
301 302
	ctxt->cqe.done = svc_rdma_wc_write;
	write_wr.wr.wr_cqe = &ctxt->cqe;
C
Christoph Hellwig 已提交
303 304 305 306 307 308
	write_wr.wr.sg_list = &sge[0];
	write_wr.wr.num_sge = sge_no;
	write_wr.wr.opcode = IB_WR_RDMA_WRITE;
	write_wr.wr.send_flags = IB_SEND_SIGNALED;
	write_wr.rkey = rmr;
	write_wr.remote_addr = to;
T
Tom Tucker 已提交
309 310 311

	/* Post It */
	atomic_inc(&rdma_stat_write);
C
Christoph Hellwig 已提交
312
	if (svc_rdma_send(xprt, &write_wr.wr))
313
		goto err;
314
	return write_len - bc;
315
 err:
316
	svc_rdma_unmap_dma(ctxt);
317 318
	svc_rdma_put_context(ctxt, 0);
	return -EIO;
T
Tom Tucker 已提交
319 320
}

321
noinline
T
Tom Tucker 已提交
322
static int send_write_chunks(struct svcxprt_rdma *xprt,
323
			     struct rpcrdma_write_array *wr_ary,
T
Tom Tucker 已提交
324 325
			     struct rpcrdma_msg *rdma_resp,
			     struct svc_rqst *rqstp,
326
			     struct svc_rdma_req_map *vec)
T
Tom Tucker 已提交
327
{
328
	u32 xfer_len = rqstp->rq_res.page_len;
T
Tom Tucker 已提交
329 330 331 332
	int write_len;
	u32 xdr_off;
	int chunk_off;
	int chunk_no;
333
	int nchunks;
T
Tom Tucker 已提交
334 335 336 337 338 339 340
	struct rpcrdma_write_array *res_ary;
	int ret;

	res_ary = (struct rpcrdma_write_array *)
		&rdma_resp->rm_body.rm_chunks[1];

	/* Write chunks start at the pagelist */
341
	nchunks = be32_to_cpu(wr_ary->wc_nchunks);
T
Tom Tucker 已提交
342
	for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
343
	     xfer_len && chunk_no < nchunks;
T
Tom Tucker 已提交
344 345 346 347
	     chunk_no++) {
		struct rpcrdma_segment *arg_ch;
		u64 rs_offset;

348
		arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
349
		write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
T
Tom Tucker 已提交
350 351 352

		/* Prepare the response chunk given the length actually
		 * written */
353
		xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
T
Tom Tucker 已提交
354
		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
355 356 357
						arg_ch->rs_handle,
						arg_ch->rs_offset,
						write_len);
T
Tom Tucker 已提交
358 359 360
		chunk_off = 0;
		while (write_len) {
			ret = send_write(xprt, rqstp,
361
					 be32_to_cpu(arg_ch->rs_handle),
T
Tom Tucker 已提交
362 363
					 rs_offset + chunk_off,
					 xdr_off,
364
					 write_len,
365
					 vec);
366 367
			if (ret <= 0)
				goto out_err;
368 369 370 371
			chunk_off += ret;
			xdr_off += ret;
			xfer_len -= ret;
			write_len -= ret;
T
Tom Tucker 已提交
372 373 374 375 376
		}
	}
	/* Update the req with the number of chunks actually used */
	svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);

377
	return rqstp->rq_res.page_len;
378 379 380 381

out_err:
	pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
	return -EIO;
T
Tom Tucker 已提交
382 383
}

384
noinline
T
Tom Tucker 已提交
385
static int send_reply_chunks(struct svcxprt_rdma *xprt,
386
			     struct rpcrdma_write_array *rp_ary,
T
Tom Tucker 已提交
387 388
			     struct rpcrdma_msg *rdma_resp,
			     struct svc_rqst *rqstp,
389
			     struct svc_rdma_req_map *vec)
T
Tom Tucker 已提交
390 391 392 393 394 395
{
	u32 xfer_len = rqstp->rq_res.len;
	int write_len;
	u32 xdr_off;
	int chunk_no;
	int chunk_off;
396
	int nchunks;
T
Tom Tucker 已提交
397 398 399 400 401 402 403 404 405 406
	struct rpcrdma_segment *ch;
	struct rpcrdma_write_array *res_ary;
	int ret;

	/* XXX: need to fix when reply lists occur with read-list and or
	 * write-list */
	res_ary = (struct rpcrdma_write_array *)
		&rdma_resp->rm_body.rm_chunks[2];

	/* xdr offset starts at RPC message */
407
	nchunks = be32_to_cpu(rp_ary->wc_nchunks);
T
Tom Tucker 已提交
408
	for (xdr_off = 0, chunk_no = 0;
409
	     xfer_len && chunk_no < nchunks;
T
Tom Tucker 已提交
410 411
	     chunk_no++) {
		u64 rs_offset;
412
		ch = &rp_ary->wc_array[chunk_no].wc_target;
413
		write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
T
Tom Tucker 已提交
414 415 416

		/* Prepare the reply chunk given the length actually
		 * written */
417
		xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
T
Tom Tucker 已提交
418
		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
419 420
						ch->rs_handle, ch->rs_offset,
						write_len);
T
Tom Tucker 已提交
421 422 423
		chunk_off = 0;
		while (write_len) {
			ret = send_write(xprt, rqstp,
424
					 be32_to_cpu(ch->rs_handle),
T
Tom Tucker 已提交
425 426
					 rs_offset + chunk_off,
					 xdr_off,
427
					 write_len,
428
					 vec);
429 430
			if (ret <= 0)
				goto out_err;
431 432 433 434
			chunk_off += ret;
			xdr_off += ret;
			xfer_len -= ret;
			write_len -= ret;
T
Tom Tucker 已提交
435 436 437 438 439 440
		}
	}
	/* Update the req with the number of chunks actually used */
	svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);

	return rqstp->rq_res.len;
441 442 443 444

out_err:
	pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
	return -EIO;
T
Tom Tucker 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
}

/* This function prepares the portion of the RPCRDMA message to be
 * sent in the RDMA_SEND. This function is called after data sent via
 * RDMA has already been transmitted. There are three cases:
 * - The RPCRDMA header, RPC header, and payload are all sent in a
 *   single RDMA_SEND. This is the "inline" case.
 * - The RPCRDMA header and some portion of the RPC header and data
 *   are sent via this RDMA_SEND and another portion of the data is
 *   sent via RDMA.
 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
 *   header and data are all transmitted via RDMA.
 * In all three cases, this function prepares the RPCRDMA header in
 * sge[0], the 'type' parameter indicates the type to place in the
 * RPCRDMA header, and the 'byte_count' field indicates how much of
460 461
 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
 * to send is zero in the XDR.
T
Tom Tucker 已提交
462 463 464 465 466
 */
static int send_reply(struct svcxprt_rdma *rdma,
		      struct svc_rqst *rqstp,
		      struct page *page,
		      struct rpcrdma_msg *rdma_resp,
467
		      struct svc_rdma_req_map *vec,
468 469
		      int byte_count,
		      u32 inv_rkey)
T
Tom Tucker 已提交
470
{
471
	struct svc_rdma_op_ctxt *ctxt;
T
Tom Tucker 已提交
472
	struct ib_send_wr send_wr;
473
	u32 xdr_off;
T
Tom Tucker 已提交
474 475 476
	int sge_no;
	int sge_bytes;
	int page_no;
477
	int pages;
478
	int ret = -EIO;
479

T
Tom Tucker 已提交
480
	/* Prepare the context */
481 482
	ctxt = svc_rdma_get_context(rdma);
	ctxt->direction = DMA_TO_DEVICE;
T
Tom Tucker 已提交
483 484 485 486
	ctxt->pages[0] = page;
	ctxt->count = 1;

	/* Prepare the SGE for the RPCRDMA Header */
C
Christoph Hellwig 已提交
487
	ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
488
	ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
T
Tom Tucker 已提交
489
	ctxt->sge[0].addr =
490 491
	    ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
			    ctxt->sge[0].length, DMA_TO_DEVICE);
492 493
	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
		goto err;
494
	svc_rdma_count_mappings(rdma, ctxt);
495

T
Tom Tucker 已提交
496
	ctxt->direction = DMA_TO_DEVICE;
497

498
	/* Map the payload indicated by 'byte_count' */
499
	xdr_off = 0;
500 501
	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
T
Tom Tucker 已提交
502
		byte_count -= sge_bytes;
S
Steve Wise 已提交
503 504 505 506 507 508 509
		ctxt->sge[sge_no].addr =
			dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
				    sge_bytes, DMA_TO_DEVICE);
		xdr_off += sge_bytes;
		if (ib_dma_mapping_error(rdma->sc_cm_id->device,
					 ctxt->sge[sge_no].addr))
			goto err;
510
		svc_rdma_count_mappings(rdma, ctxt);
C
Christoph Hellwig 已提交
511
		ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
512
		ctxt->sge[sge_no].length = sge_bytes;
T
Tom Tucker 已提交
513
	}
514 515 516 517
	if (byte_count != 0) {
		pr_err("svcrdma: Could not map %d bytes\n", byte_count);
		goto err;
	}
T
Tom Tucker 已提交
518 519 520 521 522

	/* Save all respages in the ctxt and remove them from the
	 * respages array. They are our pages until the I/O
	 * completes.
	 */
523 524
	pages = rqstp->rq_next_page - rqstp->rq_respages;
	for (page_no = 0; page_no < pages; page_no++) {
T
Tom Tucker 已提交
525 526 527 528
		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
		ctxt->count++;
		rqstp->rq_respages[page_no] = NULL;
	}
T
Tom Tucker 已提交
529
	rqstp->rq_next_page = rqstp->rq_respages + 1;
S
Steve Wise 已提交
530

531 532 533 534
	if (sge_no > rdma->sc_max_sge) {
		pr_err("svcrdma: Too many sges (%d)\n", sge_no);
		goto err;
	}
T
Tom Tucker 已提交
535
	memset(&send_wr, 0, sizeof send_wr);
536 537
	ctxt->cqe.done = svc_rdma_wc_send;
	send_wr.wr_cqe = &ctxt->cqe;
T
Tom Tucker 已提交
538 539
	send_wr.sg_list = ctxt->sge;
	send_wr.num_sge = sge_no;
540 541 542 543 544
	if (inv_rkey) {
		send_wr.opcode = IB_WR_SEND_WITH_INV;
		send_wr.ex.invalidate_rkey = inv_rkey;
	} else
		send_wr.opcode = IB_WR_SEND;
T
Tom Tucker 已提交
545 546 547 548
	send_wr.send_flags =  IB_SEND_SIGNALED;

	ret = svc_rdma_send(rdma, &send_wr);
	if (ret)
549
		goto err;
T
Tom Tucker 已提交
550

551 552 553
	return 0;

 err:
S
Steve Wise 已提交
554
	svc_rdma_unmap_dma(ctxt);
555
	svc_rdma_put_context(ctxt, 1);
556
	return ret;
T
Tom Tucker 已提交
557 558 559 560 561 562 563 564 565 566 567 568 569
}

void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
{
}

int svc_rdma_sendto(struct svc_rqst *rqstp)
{
	struct svc_xprt *xprt = rqstp->rq_xprt;
	struct svcxprt_rdma *rdma =
		container_of(xprt, struct svcxprt_rdma, sc_xprt);
	struct rpcrdma_msg *rdma_argp;
	struct rpcrdma_msg *rdma_resp;
570
	struct rpcrdma_write_array *wr_ary, *rp_ary;
T
Tom Tucker 已提交
571 572 573 574
	enum rpcrdma_proc reply_type;
	int ret;
	int inline_bytes;
	struct page *res_page;
575
	struct svc_rdma_req_map *vec;
576
	u32 inv_rkey;
T
Tom Tucker 已提交
577 578 579

	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);

C
Chuck Lever 已提交
580 581 582 583
	/* Get the RDMA request header. The receive logic always
	 * places this at the start of page 0.
	 */
	rdma_argp = page_address(rqstp->rq_pages[0]);
584
	svc_rdma_get_write_arrays(rdma_argp, &wr_ary, &rp_ary);
T
Tom Tucker 已提交
585

586 587 588 589
	inv_rkey = 0;
	if (rdma->sc_snd_w_inv)
		inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_ary, rp_ary);

590
	/* Build an req vec for the XDR */
591
	vec = svc_rdma_get_req_map(rdma);
592
	ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
593 594
	if (ret)
		goto err0;
T
Tom Tucker 已提交
595 596
	inline_bytes = rqstp->rq_res.len;

597 598 599 600 601 602
	/* Create the RDMA response header. xprt->xpt_mutex,
	 * acquired in svc_send(), serializes RPC replies. The
	 * code path below that inserts the credit grant value
	 * into each transport header runs only inside this
	 * critical section.
	 */
603 604 605 606
	ret = -ENOMEM;
	res_page = alloc_page(GFP_KERNEL);
	if (!res_page)
		goto err0;
T
Tom Tucker 已提交
607
	rdma_resp = page_address(res_page);
608
	if (rp_ary)
T
Tom Tucker 已提交
609 610 611 612 613 614 615
		reply_type = RDMA_NOMSG;
	else
		reply_type = RDMA_MSG;
	svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
					 rdma_resp, reply_type);

	/* Send any write-chunk data and build resp write-list */
616 617 618 619
	if (wr_ary) {
		ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
		if (ret < 0)
			goto err1;
620
		inline_bytes -= ret + xdr_padsize(ret);
T
Tom Tucker 已提交
621 622 623
	}

	/* Send any reply-list data and update resp reply-list */
624 625 626 627 628
	if (rp_ary) {
		ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
		if (ret < 0)
			goto err1;
		inline_bytes -= ret;
T
Tom Tucker 已提交
629 630
	}

631 632 633 634 635 636
	/* Post a fresh Receive buffer _before_ sending the reply */
	ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
	if (ret)
		goto err1;

	ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
637
			 inline_bytes, inv_rkey);
638
	if (ret < 0)
639
		goto err0;
640

641
	svc_rdma_put_req_map(rdma, vec);
T
Tom Tucker 已提交
642 643
	dprintk("svcrdma: send_reply returns %d\n", ret);
	return ret;
644 645 646 647

 err1:
	put_page(res_page);
 err0:
648
	svc_rdma_put_req_map(rdma, vec);
649 650
	pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
	       ret);
651 652
	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
	return -ENOTCONN;
T
Tom Tucker 已提交
653
}
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695

void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
			 int status)
{
	struct ib_send_wr err_wr;
	struct page *p;
	struct svc_rdma_op_ctxt *ctxt;
	enum rpcrdma_errcode err;
	__be32 *va;
	int length;
	int ret;

	ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
	if (ret)
		return;

	p = alloc_page(GFP_KERNEL);
	if (!p)
		return;
	va = page_address(p);

	/* XDR encode an error reply */
	err = ERR_CHUNK;
	if (status == -EPROTONOSUPPORT)
		err = ERR_VERS;
	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);

	ctxt = svc_rdma_get_context(xprt);
	ctxt->direction = DMA_TO_DEVICE;
	ctxt->count = 1;
	ctxt->pages[0] = p;

	/* Prepare SGE for local address */
	ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
	ctxt->sge[0].length = length;
	ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
					    p, 0, length, DMA_TO_DEVICE);
	if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
		dprintk("svcrdma: Error mapping buffer for protocol error\n");
		svc_rdma_put_context(ctxt, 1);
		return;
	}
696
	svc_rdma_count_mappings(xprt, ctxt);
697 698 699

	/* Prepare SEND WR */
	memset(&err_wr, 0, sizeof(err_wr));
700 701
	ctxt->cqe.done = svc_rdma_wc_send;
	err_wr.wr_cqe = &ctxt->cqe;
702 703 704 705 706 707 708 709 710 711 712 713 714 715
	err_wr.sg_list = ctxt->sge;
	err_wr.num_sge = 1;
	err_wr.opcode = IB_WR_SEND;
	err_wr.send_flags = IB_SEND_SIGNALED;

	/* Post It */
	ret = svc_rdma_send(xprt, &err_wr);
	if (ret) {
		dprintk("svcrdma: Error %d posting send for protocol error\n",
			ret);
		svc_rdma_unmap_dma(ctxt);
		svc_rdma_put_context(ctxt, 1);
	}
}