rpc_rdma.c 27.8 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * rpc_rdma.c
 *
 * This file contains the guts of the RPC RDMA protocol, and
 * does marshaling/unmarshaling, etc. It is also where interfacing
 * to the Linux RPC framework lives.
46 47 48 49
 */

#include "xprt_rdma.h"

50 51
#include <linux/highmem.h>

J
Jeff Layton 已提交
52
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 54 55
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

J
Jeff Layton 已提交
56
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static const char transfertypes[][12] = {
	"pure inline",	/* no chunks */
	" read chunk",	/* some argument via rdma read */
	"*read chunk",	/* entire request via rdma read */
	"write chunk",	/* some result via rdma write */
	"reply chunk"	/* entire reply via rdma write */
};
#endif

/*
 * Chunk assembly from upper layer xdr_buf.
 *
 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
 * elements. Segments are then coalesced when registered, if possible
 * within the selected memreg mode.
72 73
 *
 * Returns positive number of segments converted, or a negative errno.
74 75 76
 */

static int
77
rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
78 79 80
	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
{
	int len, n = 0, p;
81 82
	int page_base;
	struct page **ppages;
83 84 85 86 87 88 89 90

	if (pos == 0 && xdrbuf->head[0].iov_len) {
		seg[n].mr_page = NULL;
		seg[n].mr_offset = xdrbuf->head[0].iov_base;
		seg[n].mr_len = xdrbuf->head[0].iov_len;
		++n;
	}

91 92 93 94 95
	len = xdrbuf->page_len;
	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
	page_base = xdrbuf->page_base & ~PAGE_MASK;
	p = 0;
	while (len && n < nsegs) {
S
Shirley Ma 已提交
96 97 98 99
		if (!ppages[p]) {
			/* alloc the pagelist for receiving buffer */
			ppages[p] = alloc_page(GFP_ATOMIC);
			if (!ppages[p])
100
				return -ENOMEM;
S
Shirley Ma 已提交
101
		}
102 103 104
		seg[n].mr_page = ppages[p];
		seg[n].mr_offset = (void *)(unsigned long) page_base;
		seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
105 106
		if (seg[n].mr_len > PAGE_SIZE)
			return -EIO;
107
		len -= seg[n].mr_len;
108
		++n;
109 110
		++p;
		page_base = 0;	/* page offset only applies to first page */
111 112
	}

113 114
	/* Message overflows the seg array */
	if (len && n == nsegs)
115
		return -EIO;
116

117
	if (xdrbuf->tail[0].iov_len) {
118 119 120 121
		/* the rpcrdma protocol allows us to omit any trailing
		 * xdr pad bytes, saving the server an RDMA operation. */
		if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
			return n;
122
		if (n == nsegs)
123
			/* Tail remains, but we're out of segments */
124
			return -EIO;
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
		seg[n].mr_page = NULL;
		seg[n].mr_offset = xdrbuf->tail[0].iov_base;
		seg[n].mr_len = xdrbuf->tail[0].iov_len;
		++n;
	}

	return n;
}

/*
 * Create read/write chunk lists, and reply chunks, for RDMA
 *
 *   Assume check against THRESHOLD has been done, and chunks are required.
 *   Assume only encoding one list entry for read|write chunks. The NFSv3
 *     protocol is simple enough to allow this as it only has a single "bulk
 *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
 *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
 *
 * When used for a single reply chunk (which is a special write
 * chunk used for the entire reply, rather than just the data), it
 * is used primarily for READDIR and READLINK which would otherwise
 * be severely size-limited by a small rdma inline read max. The server
 * response will come back as an RDMA Write, followed by a message
 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
 * chunks do not provide data alignment, however they do not require
 * "fixup" (moving the response to the upper layer buffer) either.
 *
 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 *
 *  Read chunklist (a linked list):
 *   N elements, position P (same P for all chunks of same arg!):
 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
 *
 *  Write chunklist (a list of (one) counted array):
 *   N elements:
 *    1 - N - HLOO - HLOO - ... - HLOO - 0
 *
 *  Reply chunk (a counted array):
 *   N elements:
 *    1 - N - HLOO - HLOO - ... - HLOO
165 166
 *
 * Returns positive RPC/RDMA header size, or negative errno.
167 168
 */

169
static ssize_t
170 171 172 173
rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
		struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
{
	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
175
	int n, nsegs, nchunks = 0;
176
	unsigned int pos;
177 178 179 180
	struct rpcrdma_mr_seg *seg = req->rl_segments;
	struct rpcrdma_read_chunk *cur_rchunk = NULL;
	struct rpcrdma_write_array *warray = NULL;
	struct rpcrdma_write_chunk *cur_wchunk = NULL;
A
Al Viro 已提交
181
	__be32 *iptr = headerp->rm_body.rm_chunks;
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

	if (type == rpcrdma_readch || type == rpcrdma_areadch) {
		/* a read chunk - server will RDMA Read our memory */
		cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
	} else {
		/* a write or reply chunk - server will RDMA Write our memory */
		*iptr++ = xdr_zero;	/* encode a NULL read chunk list */
		if (type == rpcrdma_replych)
			*iptr++ = xdr_zero;	/* a NULL write chunk list */
		warray = (struct rpcrdma_write_array *) iptr;
		cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
	}

	if (type == rpcrdma_replych || type == rpcrdma_areadch)
		pos = 0;
	else
		pos = target->head[0].iov_len;

	nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201 202
	if (nsegs < 0)
		return nsegs;
203 204

	do {
205
		n = rpcrdma_register_external(seg, nsegs,
206 207 208 209 210 211
						cur_wchunk != NULL, r_xprt);
		if (n <= 0)
			goto out;
		if (cur_rchunk) {	/* read */
			cur_rchunk->rc_discrim = xdr_one;
			/* all read chunks have the same "position" */
212 213 214 215 216
			cur_rchunk->rc_position = cpu_to_be32(pos);
			cur_rchunk->rc_target.rs_handle =
						cpu_to_be32(seg->mr_rkey);
			cur_rchunk->rc_target.rs_length =
						cpu_to_be32(seg->mr_len);
217
			xdr_encode_hyper(
A
Al Viro 已提交
218
					(__be32 *)&cur_rchunk->rc_target.rs_offset,
219 220
					seg->mr_base);
			dprintk("RPC:       %s: read chunk "
221
				"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
222 223
				seg->mr_len, (unsigned long long)seg->mr_base,
				seg->mr_rkey, pos, n < nsegs ? "more" : "last");
224 225 226
			cur_rchunk++;
			r_xprt->rx_stats.read_chunk_count++;
		} else {		/* write/reply */
227 228 229 230
			cur_wchunk->wc_target.rs_handle =
						cpu_to_be32(seg->mr_rkey);
			cur_wchunk->wc_target.rs_length =
						cpu_to_be32(seg->mr_len);
231
			xdr_encode_hyper(
A
Al Viro 已提交
232
					(__be32 *)&cur_wchunk->wc_target.rs_offset,
233 234 235 236
					seg->mr_base);
			dprintk("RPC:       %s: %s chunk "
				"elem %d@0x%llx:0x%x (%s)\n", __func__,
				(type == rpcrdma_replych) ? "reply" : "write",
237 238
				seg->mr_len, (unsigned long long)seg->mr_base,
				seg->mr_rkey, n < nsegs ? "more" : "last");
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
			cur_wchunk++;
			if (type == rpcrdma_replych)
				r_xprt->rx_stats.reply_chunk_count++;
			else
				r_xprt->rx_stats.write_chunk_count++;
			r_xprt->rx_stats.total_rdma_request += seg->mr_len;
		}
		nchunks++;
		seg   += n;
		nsegs -= n;
	} while (nsegs);

	/* success. all failures return above */
	req->rl_nchunks = nchunks;

	/*
	 * finish off header. If write, marshal discrim and nchunks.
	 */
	if (cur_rchunk) {
A
Al Viro 已提交
258
		iptr = (__be32 *) cur_rchunk;
259 260 261 262 263
		*iptr++ = xdr_zero;	/* finish the read chunk list */
		*iptr++ = xdr_zero;	/* encode a NULL write chunk list */
		*iptr++ = xdr_zero;	/* encode a NULL reply chunk */
	} else {
		warray->wc_discrim = xdr_one;
264
		warray->wc_nchunks = cpu_to_be32(nchunks);
A
Al Viro 已提交
265
		iptr = (__be32 *) cur_wchunk;
266 267 268 269 270 271 272 273 274 275 276 277
		if (type == rpcrdma_writech) {
			*iptr++ = xdr_zero; /* finish the write chunk list */
			*iptr++ = xdr_zero; /* encode a NULL reply chunk */
		}
	}

	/*
	 * Return header size.
	 */
	return (unsigned char *)iptr - (unsigned char *)headerp;

out:
278 279 280 281 282
	if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR) {
		for (pos = 0; nchunks--;)
			pos += rpcrdma_deregister_external(
					&req->rl_segments[pos], r_xprt);
	}
283
	return n;
284 285
}

286 287 288 289 290 291 292 293 294 295 296
/*
 * Marshal chunks. This routine returns the header length
 * consumed by marshaling.
 *
 * Returns positive RPC/RDMA header size, or negative errno.
 */

ssize_t
rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
{
	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
297
	struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf);
298 299 300 301 302 303 304 305 306 307

	if (req->rl_rtype != rpcrdma_noch)
		result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
					       headerp, req->rl_rtype);
	else if (req->rl_wtype != rpcrdma_noch)
		result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
					       headerp, req->rl_wtype);
	return result;
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321
/*
 * Copy write data inline.
 * This function is used for "small" requests. Data which is passed
 * to RPC via iovecs (or page list) is copied directly into the
 * pre-registered memory buffer for this request. For small amounts
 * of data, this is efficient. The cutoff value is tunable.
 */
static int
rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
{
	int i, npages, curlen;
	int copy_len;
	unsigned char *srcp, *destp;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
322 323
	int page_base;
	struct page **ppages;
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339

	destp = rqst->rq_svec[0].iov_base;
	curlen = rqst->rq_svec[0].iov_len;
	destp += curlen;
	/*
	 * Do optional padding where it makes sense. Alignment of write
	 * payload can help the server, if our setting is accurate.
	 */
	pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
	if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
		pad = 0;	/* don't pad this request */

	dprintk("RPC:       %s: pad %d destp 0x%p len %d hdrlen %d\n",
		__func__, pad, destp, rqst->rq_slen, curlen);

	copy_len = rqst->rq_snd_buf.page_len;
340 341 342 343 344 345 346 347 348 349 350 351

	if (rqst->rq_snd_buf.tail[0].iov_len) {
		curlen = rqst->rq_snd_buf.tail[0].iov_len;
		if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
			memmove(destp + copy_len,
				rqst->rq_snd_buf.tail[0].iov_base, curlen);
			r_xprt->rx_stats.pullup_copy_count += curlen;
		}
		dprintk("RPC:       %s: tail destp 0x%p len %d\n",
			__func__, destp + copy_len, curlen);
		rqst->rq_svec[0].iov_len += curlen;
	}
352
	r_xprt->rx_stats.pullup_copy_count += copy_len;
353 354 355 356 357

	page_base = rqst->rq_snd_buf.page_base;
	ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
	page_base &= ~PAGE_MASK;
	npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
358
	for (i = 0; copy_len && i < npages; i++) {
359
		curlen = PAGE_SIZE - page_base;
360 361 362 363
		if (curlen > copy_len)
			curlen = copy_len;
		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
			__func__, i, destp, copy_len, curlen);
364
		srcp = kmap_atomic(ppages[i]);
365
		memcpy(destp, srcp+page_base, curlen);
366
		kunmap_atomic(srcp);
367 368 369
		rqst->rq_svec[0].iov_len += curlen;
		destp += curlen;
		copy_len -= curlen;
370
		page_base = 0;
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	}
	/* header now contains entire send message */
	return pad;
}

/*
 * Marshal a request: the primary job of this routine is to choose
 * the transfer modes. See comments below.
 *
 * Uses multiple RDMA IOVs for a request:
 *  [0] -- RPC RDMA header, which uses memory from the *start* of the
 *         preregistered buffer that already holds the RPC data in
 *         its middle.
 *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
 *  [2] -- optional padding.
 *  [3] -- if padded, header only in [1] and data here.
387 388
 *
 * Returns zero on success, otherwise a negative errno.
389 390 391 392 393
 */

int
rpcrdma_marshal_req(struct rpc_rqst *rqst)
{
394
	struct rpc_xprt *xprt = rqst->rq_xprt;
395 396 397
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
	char *base;
398 399
	size_t rpclen, padlen;
	ssize_t hdrlen;
400 401 402 403 404 405 406 407 408
	struct rpcrdma_msg *headerp;

	/*
	 * rpclen gets amount of data in first buffer, which is the
	 * pre-registered buffer.
	 */
	base = rqst->rq_svec[0].iov_base;
	rpclen = rqst->rq_svec[0].iov_len;

409
	headerp = rdmab_to_msg(req->rl_rdmabuf);
410
	/* don't byte-swap XID, it's already done in request */
411
	headerp->rm_xid = rqst->rq_xid;
412 413 414
	headerp->rm_vers = rpcrdma_version;
	headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
	headerp->rm_type = rdma_msg;
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

	/*
	 * Chunks needed for results?
	 *
	 * o If the expected result is under the inline threshold, all ops
	 *   return as inline (but see later).
	 * o Large non-read ops return as a single reply chunk.
	 * o Large read ops return data as write chunk(s), header as inline.
	 *
	 * Note: the NFS code sending down multiple result segments implies
	 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
	 */

	/*
	 * This code can handle read chunks, write chunks OR reply
	 * chunks -- only one type. If the request is too big to fit
	 * inline, then we will choose read chunks. If the request is
	 * a READ, then use write chunks to separate the file data
	 * into pages; otherwise use reply chunks.
	 */
	if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
436
		req->rl_wtype = rpcrdma_noch;
437
	else if (rqst->rq_rcv_buf.page_len == 0)
438
		req->rl_wtype = rpcrdma_replych;
439
	else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
440
		req->rl_wtype = rpcrdma_writech;
441
	else
442
		req->rl_wtype = rpcrdma_replych;
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458

	/*
	 * Chunks needed for arguments?
	 *
	 * o If the total request is under the inline threshold, all ops
	 *   are sent as inline.
	 * o Large non-write ops are sent with the entire message as a
	 *   single read chunk (protocol 0-position special case).
	 * o Large write ops transmit data as read chunk(s), header as
	 *   inline.
	 *
	 * Note: the NFS code sending down multiple argument segments
	 * implies the op is a write.
	 * TBD check NFSv4 setacl
	 */
	if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
459
		req->rl_rtype = rpcrdma_noch;
460
	else if (rqst->rq_snd_buf.page_len == 0)
461
		req->rl_rtype = rpcrdma_areadch;
462
	else
463
		req->rl_rtype = rpcrdma_readch;
464 465

	/* The following simplification is not true forever */
466 467 468
	if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych)
		req->rl_wtype = rpcrdma_noch;
	if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) {
469 470 471 472
		dprintk("RPC:       %s: cannot marshal multiple chunk lists\n",
			__func__);
		return -EIO;
	}
473

C
Chuck Lever 已提交
474
	hdrlen = RPCRDMA_HDRLEN_MIN;
475 476 477 478 479 480 481
	padlen = 0;

	/*
	 * Pull up any extra send data into the preregistered buffer.
	 * When padding is in use and applies to the transfer, insert
	 * it and change the message type.
	 */
482
	if (req->rl_rtype == rpcrdma_noch) {
483 484 485 486 487

		padlen = rpcrdma_inline_pullup(rqst,
						RPCRDMA_INLINE_PAD_VALUE(rqst));

		if (padlen) {
488
			headerp->rm_type = rdma_msgp;
489
			headerp->rm_body.rm_padded.rm_align =
490
				cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst));
491
			headerp->rm_body.rm_padded.rm_thresh =
492
				cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH);
493 494 495 496
			headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
			headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
			headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
			hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
497
			if (req->rl_wtype != rpcrdma_noch) {
498 499 500 501
				dprintk("RPC:       %s: invalid chunk list\n",
					__func__);
				return -EIO;
			}
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
		} else {
			headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
			headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
			headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
			/* new length after pullup */
			rpclen = rqst->rq_svec[0].iov_len;
			/*
			 * Currently we try to not actually use read inline.
			 * Reply chunks have the desirable property that
			 * they land, packed, directly in the target buffers
			 * without headers, so they require no fixup. The
			 * additional RDMA Write op sends the same amount
			 * of data, streams on-the-wire and adds no overhead
			 * on receive. Therefore, we request a reply chunk
			 * for non-writes wherever feasible and efficient.
			 */
518 519
			if (req->rl_wtype == rpcrdma_noch)
				req->rl_wtype = rpcrdma_replych;
520 521 522
		}
	}

523
	hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen);
524 525
	if (hdrlen < 0)
		return hdrlen;
526

527 528
	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd padlen %zd"
		" headerp 0x%p base 0x%p lkey 0x%x\n",
529
		__func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
530
		headerp, base, rdmab_lkey(req->rl_rdmabuf));
531 532 533 534 535 536 537 538

	/*
	 * initialize send_iov's - normally only two: rdma chunk header and
	 * single preregistered RPC header buffer, but if padding is present,
	 * then use a preregistered (and zeroed) pad buffer between the RPC
	 * header and any write data. In all non-rdma cases, any following
	 * data has been copied into the RPC header buffer.
	 */
539
	req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
540
	req->rl_send_iov[0].length = hdrlen;
541
	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
542

543
	req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
544
	req->rl_send_iov[1].length = rpclen;
545
	req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
546 547 548 549 550 551

	req->rl_niovs = 2;

	if (padlen) {
		struct rpcrdma_ep *ep = &r_xprt->rx_ep;

552
		req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf);
553
		req->rl_send_iov[2].length = padlen;
554
		req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf);
555 556 557

		req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
		req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
558
		req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf);
559 560 561 562 563 564 565 566 567 568 569 570

		req->rl_niovs = 4;
	}

	return 0;
}

/*
 * Chase down a received write or reply chunklist to get length
 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
 */
static int
571
rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
572 573 574
{
	unsigned int i, total_len;
	struct rpcrdma_write_chunk *cur_wchunk;
575
	char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
576

577
	i = be32_to_cpu(**iptrp);
578 579 580 581 582 583 584 585
	if (i > max)
		return -1;
	cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
	total_len = 0;
	while (i--) {
		struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
		ifdebug(FACILITY) {
			u64 off;
A
Al Viro 已提交
586
			xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
587 588
			dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
				__func__,
589
				be32_to_cpu(seg->rs_length),
590
				(unsigned long long)off,
591
				be32_to_cpu(seg->rs_handle));
592
		}
593
		total_len += be32_to_cpu(seg->rs_length);
594 595 596 597
		++cur_wchunk;
	}
	/* check and adjust for properly terminated write chunk */
	if (wrchunk) {
A
Al Viro 已提交
598
		__be32 *w = (__be32 *) cur_wchunk;
599 600 601 602
		if (*w++ != xdr_zero)
			return -1;
		cur_wchunk = (struct rpcrdma_write_chunk *) w;
	}
603
	if ((char *)cur_wchunk > base + rep->rr_len)
604 605
		return -1;

A
Al Viro 已提交
606
	*iptrp = (__be32 *) cur_wchunk;
607 608 609 610 611 612 613
	return total_len;
}

/*
 * Scatter inline received data back into provided iov's.
 */
static void
614
rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
615 616 617
{
	int i, npages, curlen, olen;
	char *destp;
618 619
	struct page **ppages;
	int page_base;
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637

	curlen = rqst->rq_rcv_buf.head[0].iov_len;
	if (curlen > copy_len) {	/* write chunk header fixup */
		curlen = copy_len;
		rqst->rq_rcv_buf.head[0].iov_len = curlen;
	}

	dprintk("RPC:       %s: srcp 0x%p len %d hdrlen %d\n",
		__func__, srcp, copy_len, curlen);

	/* Shift pointer for first receive segment only */
	rqst->rq_rcv_buf.head[0].iov_base = srcp;
	srcp += curlen;
	copy_len -= curlen;

	olen = copy_len;
	i = 0;
	rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
638 639 640 641
	page_base = rqst->rq_rcv_buf.page_base;
	ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
	page_base &= ~PAGE_MASK;

642
	if (copy_len && rqst->rq_rcv_buf.page_len) {
643
		npages = PAGE_ALIGN(page_base +
644 645
			rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
		for (; i < npages; i++) {
646
			curlen = PAGE_SIZE - page_base;
647 648 649 650 651
			if (curlen > copy_len)
				curlen = copy_len;
			dprintk("RPC:       %s: page %d"
				" srcp 0x%p len %d curlen %d\n",
				__func__, i, srcp, copy_len, curlen);
652
			destp = kmap_atomic(ppages[i]);
653 654
			memcpy(destp + page_base, srcp, curlen);
			flush_dcache_page(ppages[i]);
655
			kunmap_atomic(destp);
656 657 658 659
			srcp += curlen;
			copy_len -= curlen;
			if (copy_len == 0)
				break;
660
			page_base = 0;
661
		}
662
	}
663 664 665 666 667 668

	if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
		curlen = copy_len;
		if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
			curlen = rqst->rq_rcv_buf.tail[0].iov_len;
		if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
669
			memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
670 671 672 673 674 675 676
		dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
			__func__, srcp, copy_len, curlen);
		rqst->rq_rcv_buf.tail[0].iov_len = curlen;
		copy_len -= curlen; ++i;
	} else
		rqst->rq_rcv_buf.tail[0].iov_len = 0;

677 678 679 680 681 682 683
	if (pad) {
		/* implicit padding on terminal chunk */
		unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
		while (pad--)
			p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
	}

684 685 686 687 688 689 690 691 692 693
	if (copy_len)
		dprintk("RPC:       %s: %d bytes in"
			" %d extra segments (%d lost)\n",
			__func__, olen, i, copy_len);

	/* TBD avoid a warning from call_decode() */
	rqst->rq_private_buf = rqst->rq_rcv_buf;
}

void
694
rpcrdma_connect_worker(struct work_struct *work)
695
{
696 697
	struct rpcrdma_ep *ep =
		container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
698 699 700
	struct rpcrdma_xprt *r_xprt =
		container_of(ep, struct rpcrdma_xprt, rx_ep);
	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
701 702

	spin_lock_bh(&xprt->transport_lock);
703 704
	if (++xprt->connect_cookie == 0)	/* maintain a reserved value */
		++xprt->connect_cookie;
705 706 707 708 709
	if (ep->rep_connected > 0) {
		if (!xprt_test_and_set_connected(xprt))
			xprt_wake_pending_tasks(xprt, 0);
	} else {
		if (xprt_test_and_clear_connected(xprt))
710
			xprt_wake_pending_tasks(xprt, -ENOTCONN);
711 712 713 714
	}
	spin_unlock_bh(&xprt->transport_lock);
}

715 716 717 718 719 720 721 722 723 724 725 726
/*
 * This function is called when an async event is posted to
 * the connection which changes the connection state. All it
 * does at this point is mark the connection up/down, the rpc
 * timers do the rest.
 */
void
rpcrdma_conn_func(struct rpcrdma_ep *ep)
{
	schedule_delayed_work(&ep->rep_connect_worker, 0);
}

727 728 729 730 731 732 733 734 735 736 737 738 739
/*
 * Called as a tasklet to do req/reply match and complete a request
 * Errors must result in the RPC task either being awakened, or
 * allowed to timeout, to discover the errors at that time.
 */
void
rpcrdma_reply_handler(struct rpcrdma_rep *rep)
{
	struct rpcrdma_msg *headerp;
	struct rpcrdma_req *req;
	struct rpc_rqst *rqst;
	struct rpc_xprt *xprt = rep->rr_xprt;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
A
Al Viro 已提交
740
	__be32 *iptr;
741
	int credits, rdmalen, status;
742
	unsigned long cwnd;
743 744 745 746 747 748 749 750 751 752

	/* Check status. If bad, signal disconnect and return rep to pool */
	if (rep->rr_len == ~0U) {
		rpcrdma_recv_buffer_put(rep);
		if (r_xprt->rx_ep.rep_connected == 1) {
			r_xprt->rx_ep.rep_connected = -EIO;
			rpcrdma_conn_func(&r_xprt->rx_ep);
		}
		return;
	}
C
Chuck Lever 已提交
753
	if (rep->rr_len < RPCRDMA_HDRLEN_MIN) {
754 755 756
		dprintk("RPC:       %s: short/invalid reply\n", __func__);
		goto repost;
	}
757
	headerp = rdmab_to_msg(rep->rr_rdmabuf);
758
	if (headerp->rm_vers != rpcrdma_version) {
759
		dprintk("RPC:       %s: invalid version %d\n",
760
			__func__, be32_to_cpu(headerp->rm_vers));
761 762 763 764 765 766 767 768 769 770
		goto repost;
	}

	/* Get XID and try for a match. */
	spin_lock(&xprt->transport_lock);
	rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
	if (rqst == NULL) {
		spin_unlock(&xprt->transport_lock);
		dprintk("RPC:       %s: reply 0x%p failed "
			"to match any request xid 0x%08x len %d\n",
771 772
			__func__, rep, be32_to_cpu(headerp->rm_xid),
			rep->rr_len);
773 774 775 776 777 778 779 780 781 782 783
repost:
		r_xprt->rx_stats.bad_reply_count++;
		rep->rr_func = rpcrdma_reply_handler;
		if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
			rpcrdma_recv_buffer_put(rep);

		return;
	}

	/* get request object */
	req = rpcr_to_rdmar(rqst);
784 785 786 787
	if (req->rl_reply) {
		spin_unlock(&xprt->transport_lock);
		dprintk("RPC:       %s: duplicate reply 0x%p to RPC "
			"request 0x%p: xid 0x%08x\n", __func__, rep, req,
788
			be32_to_cpu(headerp->rm_xid));
789 790
		goto repost;
	}
791 792 793

	dprintk("RPC:       %s: reply 0x%p completes request 0x%p\n"
		"                   RPC request 0x%p xid 0x%08x\n",
794 795
			__func__, rep, req, rqst,
			be32_to_cpu(headerp->rm_xid));
796 797 798

	/* from here on, the reply is no longer an orphan */
	req->rl_reply = rep;
799
	xprt->reestablish_timeout = 0;
800 801 802 803

	/* check for expected message types */
	/* The order of some of these tests is important. */
	switch (headerp->rm_type) {
804
	case rdma_msg:
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
		/* never expect read chunks */
		/* never expect reply chunks (two ways to check) */
		/* never expect write chunks without having offered RDMA */
		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
		    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
		     headerp->rm_body.rm_chunks[2] != xdr_zero) ||
		    (headerp->rm_body.rm_chunks[1] != xdr_zero &&
		     req->rl_nchunks == 0))
			goto badheader;
		if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
			/* count any expected write chunks in read reply */
			/* start at write chunk array count */
			iptr = &headerp->rm_body.rm_chunks[2];
			rdmalen = rpcrdma_count_chunks(rep,
						req->rl_nchunks, 1, &iptr);
			/* check for validity, and no reply chunk after */
			if (rdmalen < 0 || *iptr++ != xdr_zero)
				goto badheader;
			rep->rr_len -=
			    ((unsigned char *)iptr - (unsigned char *)headerp);
			status = rep->rr_len + rdmalen;
			r_xprt->rx_stats.total_rdma_reply += rdmalen;
827 828 829 830 831
			/* special case - last chunk may omit padding */
			if (rdmalen &= 3) {
				rdmalen = 4 - rdmalen;
				status += rdmalen;
			}
832 833
		} else {
			/* else ordinary inline */
834
			rdmalen = 0;
C
Chuck Lever 已提交
835 836 837
			iptr = (__be32 *)((unsigned char *)headerp +
							RPCRDMA_HDRLEN_MIN);
			rep->rr_len -= RPCRDMA_HDRLEN_MIN;
838 839 840
			status = rep->rr_len;
		}
		/* Fix up the rpc results for upper layer */
841
		rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
842 843
		break;

844
	case rdma_nomsg:
845 846 847 848 849 850
		/* never expect read or write chunks, always reply chunks */
		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
		    headerp->rm_body.rm_chunks[1] != xdr_zero ||
		    headerp->rm_body.rm_chunks[2] != xdr_one ||
		    req->rl_nchunks == 0)
			goto badheader;
C
Chuck Lever 已提交
851 852
		iptr = (__be32 *)((unsigned char *)headerp +
							RPCRDMA_HDRLEN_MIN);
853 854 855 856 857 858 859 860 861 862 863 864 865
		rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
		if (rdmalen < 0)
			goto badheader;
		r_xprt->rx_stats.total_rdma_reply += rdmalen;
		/* Reply chunk buffer already is the reply vector - no fixup. */
		status = rdmalen;
		break;

badheader:
	default:
		dprintk("%s: invalid rpcrdma reply header (type %d):"
				" chunks[012] == %d %d %d"
				" expected chunks <= %d\n",
866
				__func__, be32_to_cpu(headerp->rm_type),
867 868 869 870 871 872 873 874 875
				headerp->rm_body.rm_chunks[0],
				headerp->rm_body.rm_chunks[1],
				headerp->rm_body.rm_chunks[2],
				req->rl_nchunks);
		status = -EIO;
		r_xprt->rx_stats.bad_reply_count++;
		break;
	}

876 877 878 879 880 881
	credits = be32_to_cpu(headerp->rm_credit);
	if (credits == 0)
		credits = 1;	/* don't deadlock */
	else if (credits > r_xprt->rx_buf.rb_max_requests)
		credits = r_xprt->rx_buf.rb_max_requests;

882
	cwnd = xprt->cwnd;
883
	xprt->cwnd = credits << RPC_CWNDSHIFT;
884 885 886
	if (xprt->cwnd > cwnd)
		xprt_release_rqst_cong(rqst->rq_task);

887 888 889 890 891
	dprintk("RPC:       %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
			__func__, xprt, rqst, status);
	xprt_complete_rqst(rqst->rq_task, status);
	spin_unlock(&xprt->transport_lock);
}