rpc_rdma.c 26.0 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * rpc_rdma.c
 *
 * This file contains the guts of the RPC RDMA protocol, and
 * does marshaling/unmarshaling, etc. It is also where interfacing
 * to the Linux RPC framework lives.
46 47 48 49
 */

#include "xprt_rdma.h"

50 51
#include <linux/highmem.h>

J
Jeff Layton 已提交
52
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 54 55
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

56 57 58 59 60 61 62 63
enum rpcrdma_chunktype {
	rpcrdma_noch = 0,
	rpcrdma_readch,
	rpcrdma_areadch,
	rpcrdma_writech,
	rpcrdma_replych
};

J
Jeff Layton 已提交
64
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
static const char transfertypes[][12] = {
	"pure inline",	/* no chunks */
	" read chunk",	/* some argument via rdma read */
	"*read chunk",	/* entire request via rdma read */
	"write chunk",	/* some result via rdma write */
	"reply chunk"	/* entire reply via rdma write */
};
#endif

/*
 * Chunk assembly from upper layer xdr_buf.
 *
 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
 * elements. Segments are then coalesced when registered, if possible
 * within the selected memreg mode.
80 81
 *
 * Returns positive number of segments converted, or a negative errno.
82 83 84
 */

static int
85
rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
86 87 88
	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
{
	int len, n = 0, p;
89 90
	int page_base;
	struct page **ppages;
91 92 93 94 95 96 97 98

	if (pos == 0 && xdrbuf->head[0].iov_len) {
		seg[n].mr_page = NULL;
		seg[n].mr_offset = xdrbuf->head[0].iov_base;
		seg[n].mr_len = xdrbuf->head[0].iov_len;
		++n;
	}

99 100 101 102 103
	len = xdrbuf->page_len;
	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
	page_base = xdrbuf->page_base & ~PAGE_MASK;
	p = 0;
	while (len && n < nsegs) {
S
Shirley Ma 已提交
104 105 106 107
		if (!ppages[p]) {
			/* alloc the pagelist for receiving buffer */
			ppages[p] = alloc_page(GFP_ATOMIC);
			if (!ppages[p])
108
				return -ENOMEM;
S
Shirley Ma 已提交
109
		}
110 111 112
		seg[n].mr_page = ppages[p];
		seg[n].mr_offset = (void *)(unsigned long) page_base;
		seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
113 114
		if (seg[n].mr_len > PAGE_SIZE)
			return -EIO;
115
		len -= seg[n].mr_len;
116
		++n;
117 118
		++p;
		page_base = 0;	/* page offset only applies to first page */
119 120
	}

121 122
	/* Message overflows the seg array */
	if (len && n == nsegs)
123
		return -EIO;
124

125
	if (xdrbuf->tail[0].iov_len) {
126 127 128 129
		/* the rpcrdma protocol allows us to omit any trailing
		 * xdr pad bytes, saving the server an RDMA operation. */
		if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
			return n;
130
		if (n == nsegs)
131
			/* Tail remains, but we're out of segments */
132
			return -EIO;
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
		seg[n].mr_page = NULL;
		seg[n].mr_offset = xdrbuf->tail[0].iov_base;
		seg[n].mr_len = xdrbuf->tail[0].iov_len;
		++n;
	}

	return n;
}

/*
 * Create read/write chunk lists, and reply chunks, for RDMA
 *
 *   Assume check against THRESHOLD has been done, and chunks are required.
 *   Assume only encoding one list entry for read|write chunks. The NFSv3
 *     protocol is simple enough to allow this as it only has a single "bulk
 *     result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
 *     RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
 *
 * When used for a single reply chunk (which is a special write
 * chunk used for the entire reply, rather than just the data), it
 * is used primarily for READDIR and READLINK which would otherwise
 * be severely size-limited by a small rdma inline read max. The server
 * response will come back as an RDMA Write, followed by a message
 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
 * chunks do not provide data alignment, however they do not require
 * "fixup" (moving the response to the upper layer buffer) either.
 *
 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
 *
 *  Read chunklist (a linked list):
 *   N elements, position P (same P for all chunks of same arg!):
 *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
 *
 *  Write chunklist (a list of (one) counted array):
 *   N elements:
 *    1 - N - HLOO - HLOO - ... - HLOO - 0
 *
 *  Reply chunk (a counted array):
 *   N elements:
 *    1 - N - HLOO - HLOO - ... - HLOO
173 174
 *
 * Returns positive RPC/RDMA header size, or negative errno.
175 176
 */

177
static ssize_t
178 179 180 181
rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
		struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
{
	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
182
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
183
	int n, nsegs, nchunks = 0;
184
	unsigned int pos;
185 186 187 188
	struct rpcrdma_mr_seg *seg = req->rl_segments;
	struct rpcrdma_read_chunk *cur_rchunk = NULL;
	struct rpcrdma_write_array *warray = NULL;
	struct rpcrdma_write_chunk *cur_wchunk = NULL;
A
Al Viro 已提交
189
	__be32 *iptr = headerp->rm_body.rm_chunks;
190
	int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209

	if (type == rpcrdma_readch || type == rpcrdma_areadch) {
		/* a read chunk - server will RDMA Read our memory */
		cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
	} else {
		/* a write or reply chunk - server will RDMA Write our memory */
		*iptr++ = xdr_zero;	/* encode a NULL read chunk list */
		if (type == rpcrdma_replych)
			*iptr++ = xdr_zero;	/* a NULL write chunk list */
		warray = (struct rpcrdma_write_array *) iptr;
		cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
	}

	if (type == rpcrdma_replych || type == rpcrdma_areadch)
		pos = 0;
	else
		pos = target->head[0].iov_len;

	nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
210 211
	if (nsegs < 0)
		return nsegs;
212

213
	map = r_xprt->rx_ia.ri_ops->ro_map;
214
	do {
215
		n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
216 217 218 219 220
		if (n <= 0)
			goto out;
		if (cur_rchunk) {	/* read */
			cur_rchunk->rc_discrim = xdr_one;
			/* all read chunks have the same "position" */
221 222 223 224 225
			cur_rchunk->rc_position = cpu_to_be32(pos);
			cur_rchunk->rc_target.rs_handle =
						cpu_to_be32(seg->mr_rkey);
			cur_rchunk->rc_target.rs_length =
						cpu_to_be32(seg->mr_len);
226
			xdr_encode_hyper(
A
Al Viro 已提交
227
					(__be32 *)&cur_rchunk->rc_target.rs_offset,
228 229
					seg->mr_base);
			dprintk("RPC:       %s: read chunk "
230
				"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
231 232
				seg->mr_len, (unsigned long long)seg->mr_base,
				seg->mr_rkey, pos, n < nsegs ? "more" : "last");
233 234 235
			cur_rchunk++;
			r_xprt->rx_stats.read_chunk_count++;
		} else {		/* write/reply */
236 237 238 239
			cur_wchunk->wc_target.rs_handle =
						cpu_to_be32(seg->mr_rkey);
			cur_wchunk->wc_target.rs_length =
						cpu_to_be32(seg->mr_len);
240
			xdr_encode_hyper(
A
Al Viro 已提交
241
					(__be32 *)&cur_wchunk->wc_target.rs_offset,
242 243 244 245
					seg->mr_base);
			dprintk("RPC:       %s: %s chunk "
				"elem %d@0x%llx:0x%x (%s)\n", __func__,
				(type == rpcrdma_replych) ? "reply" : "write",
246 247
				seg->mr_len, (unsigned long long)seg->mr_base,
				seg->mr_rkey, n < nsegs ? "more" : "last");
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
			cur_wchunk++;
			if (type == rpcrdma_replych)
				r_xprt->rx_stats.reply_chunk_count++;
			else
				r_xprt->rx_stats.write_chunk_count++;
			r_xprt->rx_stats.total_rdma_request += seg->mr_len;
		}
		nchunks++;
		seg   += n;
		nsegs -= n;
	} while (nsegs);

	/* success. all failures return above */
	req->rl_nchunks = nchunks;

	/*
	 * finish off header. If write, marshal discrim and nchunks.
	 */
	if (cur_rchunk) {
A
Al Viro 已提交
267
		iptr = (__be32 *) cur_rchunk;
268 269 270 271 272
		*iptr++ = xdr_zero;	/* finish the read chunk list */
		*iptr++ = xdr_zero;	/* encode a NULL write chunk list */
		*iptr++ = xdr_zero;	/* encode a NULL reply chunk */
	} else {
		warray->wc_discrim = xdr_one;
273
		warray->wc_nchunks = cpu_to_be32(nchunks);
A
Al Viro 已提交
274
		iptr = (__be32 *) cur_wchunk;
275 276 277 278 279 280 281 282 283 284 285 286
		if (type == rpcrdma_writech) {
			*iptr++ = xdr_zero; /* finish the write chunk list */
			*iptr++ = xdr_zero; /* encode a NULL reply chunk */
		}
	}

	/*
	 * Return header size.
	 */
	return (unsigned char *)iptr - (unsigned char *)headerp;

out:
287 288 289
	for (pos = 0; nchunks--;)
		pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
						      &req->rl_segments[pos]);
290
	return n;
291 292 293 294 295 296 297 298 299
}

/*
 * Copy write data inline.
 * This function is used for "small" requests. Data which is passed
 * to RPC via iovecs (or page list) is copied directly into the
 * pre-registered memory buffer for this request. For small amounts
 * of data, this is efficient. The cutoff value is tunable.
 */
300
static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
301 302 303 304 305
{
	int i, npages, curlen;
	int copy_len;
	unsigned char *srcp, *destp;
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
306 307
	int page_base;
	struct page **ppages;
308 309 310 311 312

	destp = rqst->rq_svec[0].iov_base;
	curlen = rqst->rq_svec[0].iov_len;
	destp += curlen;

313 314
	dprintk("RPC:       %s: destp 0x%p len %d hdrlen %d\n",
		__func__, destp, rqst->rq_slen, curlen);
315 316

	copy_len = rqst->rq_snd_buf.page_len;
317 318 319 320 321 322 323 324 325 326 327 328

	if (rqst->rq_snd_buf.tail[0].iov_len) {
		curlen = rqst->rq_snd_buf.tail[0].iov_len;
		if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
			memmove(destp + copy_len,
				rqst->rq_snd_buf.tail[0].iov_base, curlen);
			r_xprt->rx_stats.pullup_copy_count += curlen;
		}
		dprintk("RPC:       %s: tail destp 0x%p len %d\n",
			__func__, destp + copy_len, curlen);
		rqst->rq_svec[0].iov_len += curlen;
	}
329
	r_xprt->rx_stats.pullup_copy_count += copy_len;
330 331 332 333 334

	page_base = rqst->rq_snd_buf.page_base;
	ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
	page_base &= ~PAGE_MASK;
	npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
335
	for (i = 0; copy_len && i < npages; i++) {
336
		curlen = PAGE_SIZE - page_base;
337 338 339 340
		if (curlen > copy_len)
			curlen = copy_len;
		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
			__func__, i, destp, copy_len, curlen);
341
		srcp = kmap_atomic(ppages[i]);
342
		memcpy(destp, srcp+page_base, curlen);
343
		kunmap_atomic(srcp);
344 345 346
		rqst->rq_svec[0].iov_len += curlen;
		destp += curlen;
		copy_len -= curlen;
347
		page_base = 0;
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	}
	/* header now contains entire send message */
}

/*
 * Marshal a request: the primary job of this routine is to choose
 * the transfer modes. See comments below.
 *
 * Uses multiple RDMA IOVs for a request:
 *  [0] -- RPC RDMA header, which uses memory from the *start* of the
 *         preregistered buffer that already holds the RPC data in
 *         its middle.
 *  [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
 *  [2] -- optional padding.
 *  [3] -- if padded, header only in [1] and data here.
363 364
 *
 * Returns zero on success, otherwise a negative errno.
365 366 367 368 369
 */

int
rpcrdma_marshal_req(struct rpc_rqst *rqst)
{
370
	struct rpc_xprt *xprt = rqst->rq_xprt;
371 372 373
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
	char *base;
374
	size_t rpclen;
375
	ssize_t hdrlen;
376
	enum rpcrdma_chunktype rtype, wtype;
377 378 379 380 381 382 383 384 385
	struct rpcrdma_msg *headerp;

	/*
	 * rpclen gets amount of data in first buffer, which is the
	 * pre-registered buffer.
	 */
	base = rqst->rq_svec[0].iov_base;
	rpclen = rqst->rq_svec[0].iov_len;

386
	headerp = rdmab_to_msg(req->rl_rdmabuf);
387
	/* don't byte-swap XID, it's already done in request */
388
	headerp->rm_xid = rqst->rq_xid;
389 390 391
	headerp->rm_vers = rpcrdma_version;
	headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
	headerp->rm_type = rdma_msg;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

	/*
	 * Chunks needed for results?
	 *
	 * o If the expected result is under the inline threshold, all ops
	 *   return as inline (but see later).
	 * o Large non-read ops return as a single reply chunk.
	 * o Large read ops return data as write chunk(s), header as inline.
	 *
	 * Note: the NFS code sending down multiple result segments implies
	 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
	 */

	/*
	 * This code can handle read chunks, write chunks OR reply
	 * chunks -- only one type. If the request is too big to fit
	 * inline, then we will choose read chunks. If the request is
	 * a READ, then use write chunks to separate the file data
	 * into pages; otherwise use reply chunks.
	 */
	if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
413
		wtype = rpcrdma_noch;
414
	else if (rqst->rq_rcv_buf.page_len == 0)
415
		wtype = rpcrdma_replych;
416
	else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
417
		wtype = rpcrdma_writech;
418
	else
419
		wtype = rpcrdma_replych;
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

	/*
	 * Chunks needed for arguments?
	 *
	 * o If the total request is under the inline threshold, all ops
	 *   are sent as inline.
	 * o Large non-write ops are sent with the entire message as a
	 *   single read chunk (protocol 0-position special case).
	 * o Large write ops transmit data as read chunk(s), header as
	 *   inline.
	 *
	 * Note: the NFS code sending down multiple argument segments
	 * implies the op is a write.
	 * TBD check NFSv4 setacl
	 */
	if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
436
		rtype = rpcrdma_noch;
437
	else if (rqst->rq_snd_buf.page_len == 0)
438
		rtype = rpcrdma_areadch;
439
	else
440
		rtype = rpcrdma_readch;
441 442

	/* The following simplification is not true forever */
443 444 445
	if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
		wtype = rpcrdma_noch;
	if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
446 447 448 449
		dprintk("RPC:       %s: cannot marshal multiple chunk lists\n",
			__func__);
		return -EIO;
	}
450

C
Chuck Lever 已提交
451
	hdrlen = RPCRDMA_HDRLEN_MIN;
452 453 454 455 456 457

	/*
	 * Pull up any extra send data into the preregistered buffer.
	 * When padding is in use and applies to the transfer, insert
	 * it and change the message type.
	 */
458
	if (rtype == rpcrdma_noch) {
459

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
		rpcrdma_inline_pullup(rqst);

		headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
		headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
		headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
		/* new length after pullup */
		rpclen = rqst->rq_svec[0].iov_len;
		/* Currently we try to not actually use read inline.
		 * Reply chunks have the desirable property that
		 * they land, packed, directly in the target buffers
		 * without headers, so they require no fixup. The
		 * additional RDMA Write op sends the same amount
		 * of data, streams on-the-wire and adds no overhead
		 * on receive. Therefore, we request a reply chunk
		 * for non-writes wherever feasible and efficient.
		 */
		if (wtype == rpcrdma_noch)
			wtype = rpcrdma_replych;
478 479
	}

480 481 482 483 484 485 486 487 488
	if (rtype != rpcrdma_noch) {
		hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
					       headerp, rtype);
		wtype = rtype;	/* simplify dprintk */

	} else if (wtype != rpcrdma_noch) {
		hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
					       headerp, wtype);
	}
489 490
	if (hdrlen < 0)
		return hdrlen;
491

492
	dprintk("RPC:       %s: %s: hdrlen %zd rpclen %zd"
493
		" headerp 0x%p base 0x%p lkey 0x%x\n",
494
		__func__, transfertypes[wtype], hdrlen, rpclen,
495
		headerp, base, rdmab_lkey(req->rl_rdmabuf));
496 497 498 499 500 501 502 503

	/*
	 * initialize send_iov's - normally only two: rdma chunk header and
	 * single preregistered RPC header buffer, but if padding is present,
	 * then use a preregistered (and zeroed) pad buffer between the RPC
	 * header and any write data. In all non-rdma cases, any following
	 * data has been copied into the RPC header buffer.
	 */
504
	req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
505
	req->rl_send_iov[0].length = hdrlen;
506
	req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
507

508
	req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
509
	req->rl_send_iov[1].length = rpclen;
510
	req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
511 512 513 514 515 516 517 518 519 520

	req->rl_niovs = 2;
	return 0;
}

/*
 * Chase down a received write or reply chunklist to get length
 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
 */
static int
521
rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
522 523 524
{
	unsigned int i, total_len;
	struct rpcrdma_write_chunk *cur_wchunk;
525
	char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
526

527
	i = be32_to_cpu(**iptrp);
528 529 530 531 532 533 534 535
	if (i > max)
		return -1;
	cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
	total_len = 0;
	while (i--) {
		struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
		ifdebug(FACILITY) {
			u64 off;
A
Al Viro 已提交
536
			xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
537 538
			dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
				__func__,
539
				be32_to_cpu(seg->rs_length),
540
				(unsigned long long)off,
541
				be32_to_cpu(seg->rs_handle));
542
		}
543
		total_len += be32_to_cpu(seg->rs_length);
544 545 546 547
		++cur_wchunk;
	}
	/* check and adjust for properly terminated write chunk */
	if (wrchunk) {
A
Al Viro 已提交
548
		__be32 *w = (__be32 *) cur_wchunk;
549 550 551 552
		if (*w++ != xdr_zero)
			return -1;
		cur_wchunk = (struct rpcrdma_write_chunk *) w;
	}
553
	if ((char *)cur_wchunk > base + rep->rr_len)
554 555
		return -1;

A
Al Viro 已提交
556
	*iptrp = (__be32 *) cur_wchunk;
557 558 559 560 561 562 563
	return total_len;
}

/*
 * Scatter inline received data back into provided iov's.
 */
static void
564
rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
565 566 567
{
	int i, npages, curlen, olen;
	char *destp;
568 569
	struct page **ppages;
	int page_base;
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

	curlen = rqst->rq_rcv_buf.head[0].iov_len;
	if (curlen > copy_len) {	/* write chunk header fixup */
		curlen = copy_len;
		rqst->rq_rcv_buf.head[0].iov_len = curlen;
	}

	dprintk("RPC:       %s: srcp 0x%p len %d hdrlen %d\n",
		__func__, srcp, copy_len, curlen);

	/* Shift pointer for first receive segment only */
	rqst->rq_rcv_buf.head[0].iov_base = srcp;
	srcp += curlen;
	copy_len -= curlen;

	olen = copy_len;
	i = 0;
	rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
588 589 590 591
	page_base = rqst->rq_rcv_buf.page_base;
	ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
	page_base &= ~PAGE_MASK;

592
	if (copy_len && rqst->rq_rcv_buf.page_len) {
593
		npages = PAGE_ALIGN(page_base +
594 595
			rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
		for (; i < npages; i++) {
596
			curlen = PAGE_SIZE - page_base;
597 598 599 600 601
			if (curlen > copy_len)
				curlen = copy_len;
			dprintk("RPC:       %s: page %d"
				" srcp 0x%p len %d curlen %d\n",
				__func__, i, srcp, copy_len, curlen);
602
			destp = kmap_atomic(ppages[i]);
603 604
			memcpy(destp + page_base, srcp, curlen);
			flush_dcache_page(ppages[i]);
605
			kunmap_atomic(destp);
606 607 608 609
			srcp += curlen;
			copy_len -= curlen;
			if (copy_len == 0)
				break;
610
			page_base = 0;
611
		}
612
	}
613 614 615 616 617 618

	if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
		curlen = copy_len;
		if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
			curlen = rqst->rq_rcv_buf.tail[0].iov_len;
		if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
619
			memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
620 621 622 623 624 625 626
		dprintk("RPC:       %s: tail srcp 0x%p len %d curlen %d\n",
			__func__, srcp, copy_len, curlen);
		rqst->rq_rcv_buf.tail[0].iov_len = curlen;
		copy_len -= curlen; ++i;
	} else
		rqst->rq_rcv_buf.tail[0].iov_len = 0;

627 628 629 630 631 632 633
	if (pad) {
		/* implicit padding on terminal chunk */
		unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
		while (pad--)
			p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
	}

634 635 636 637 638 639 640 641 642 643
	if (copy_len)
		dprintk("RPC:       %s: %d bytes in"
			" %d extra segments (%d lost)\n",
			__func__, olen, i, copy_len);

	/* TBD avoid a warning from call_decode() */
	rqst->rq_private_buf = rqst->rq_rcv_buf;
}

void
644
rpcrdma_connect_worker(struct work_struct *work)
645
{
646 647
	struct rpcrdma_ep *ep =
		container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
648 649 650
	struct rpcrdma_xprt *r_xprt =
		container_of(ep, struct rpcrdma_xprt, rx_ep);
	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
651 652

	spin_lock_bh(&xprt->transport_lock);
653 654
	if (++xprt->connect_cookie == 0)	/* maintain a reserved value */
		++xprt->connect_cookie;
655 656 657 658 659
	if (ep->rep_connected > 0) {
		if (!xprt_test_and_set_connected(xprt))
			xprt_wake_pending_tasks(xprt, 0);
	} else {
		if (xprt_test_and_clear_connected(xprt))
660
			xprt_wake_pending_tasks(xprt, -ENOTCONN);
661 662 663 664
	}
	spin_unlock_bh(&xprt->transport_lock);
}

665 666 667 668 669 670 671 672 673 674 675 676
/*
 * This function is called when an async event is posted to
 * the connection which changes the connection state. All it
 * does at this point is mark the connection up/down, the rpc
 * timers do the rest.
 */
void
rpcrdma_conn_func(struct rpcrdma_ep *ep)
{
	schedule_delayed_work(&ep->rep_connect_worker, 0);
}

677 678 679 680 681 682 683 684 685 686 687
/*
 * Called as a tasklet to do req/reply match and complete a request
 * Errors must result in the RPC task either being awakened, or
 * allowed to timeout, to discover the errors at that time.
 */
void
rpcrdma_reply_handler(struct rpcrdma_rep *rep)
{
	struct rpcrdma_msg *headerp;
	struct rpcrdma_req *req;
	struct rpc_rqst *rqst;
688 689
	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
A
Al Viro 已提交
690
	__be32 *iptr;
691
	int rdmalen, status;
692
	unsigned long cwnd;
693
	u32 credits;
694 695 696 697 698 699 700 701 702 703

	/* Check status. If bad, signal disconnect and return rep to pool */
	if (rep->rr_len == ~0U) {
		rpcrdma_recv_buffer_put(rep);
		if (r_xprt->rx_ep.rep_connected == 1) {
			r_xprt->rx_ep.rep_connected = -EIO;
			rpcrdma_conn_func(&r_xprt->rx_ep);
		}
		return;
	}
C
Chuck Lever 已提交
704
	if (rep->rr_len < RPCRDMA_HDRLEN_MIN) {
705 706 707
		dprintk("RPC:       %s: short/invalid reply\n", __func__);
		goto repost;
	}
708
	headerp = rdmab_to_msg(rep->rr_rdmabuf);
709
	if (headerp->rm_vers != rpcrdma_version) {
710
		dprintk("RPC:       %s: invalid version %d\n",
711
			__func__, be32_to_cpu(headerp->rm_vers));
712 713 714 715 716 717 718 719 720 721
		goto repost;
	}

	/* Get XID and try for a match. */
	spin_lock(&xprt->transport_lock);
	rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
	if (rqst == NULL) {
		spin_unlock(&xprt->transport_lock);
		dprintk("RPC:       %s: reply 0x%p failed "
			"to match any request xid 0x%08x len %d\n",
722 723
			__func__, rep, be32_to_cpu(headerp->rm_xid),
			rep->rr_len);
724 725 726 727 728 729 730 731 732 733
repost:
		r_xprt->rx_stats.bad_reply_count++;
		if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
			rpcrdma_recv_buffer_put(rep);

		return;
	}

	/* get request object */
	req = rpcr_to_rdmar(rqst);
734 735 736 737
	if (req->rl_reply) {
		spin_unlock(&xprt->transport_lock);
		dprintk("RPC:       %s: duplicate reply 0x%p to RPC "
			"request 0x%p: xid 0x%08x\n", __func__, rep, req,
738
			be32_to_cpu(headerp->rm_xid));
739 740
		goto repost;
	}
741 742 743

	dprintk("RPC:       %s: reply 0x%p completes request 0x%p\n"
		"                   RPC request 0x%p xid 0x%08x\n",
744 745
			__func__, rep, req, rqst,
			be32_to_cpu(headerp->rm_xid));
746 747 748

	/* from here on, the reply is no longer an orphan */
	req->rl_reply = rep;
749
	xprt->reestablish_timeout = 0;
750 751 752 753

	/* check for expected message types */
	/* The order of some of these tests is important. */
	switch (headerp->rm_type) {
754
	case rdma_msg:
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
		/* never expect read chunks */
		/* never expect reply chunks (two ways to check) */
		/* never expect write chunks without having offered RDMA */
		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
		    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
		     headerp->rm_body.rm_chunks[2] != xdr_zero) ||
		    (headerp->rm_body.rm_chunks[1] != xdr_zero &&
		     req->rl_nchunks == 0))
			goto badheader;
		if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
			/* count any expected write chunks in read reply */
			/* start at write chunk array count */
			iptr = &headerp->rm_body.rm_chunks[2];
			rdmalen = rpcrdma_count_chunks(rep,
						req->rl_nchunks, 1, &iptr);
			/* check for validity, and no reply chunk after */
			if (rdmalen < 0 || *iptr++ != xdr_zero)
				goto badheader;
			rep->rr_len -=
			    ((unsigned char *)iptr - (unsigned char *)headerp);
			status = rep->rr_len + rdmalen;
			r_xprt->rx_stats.total_rdma_reply += rdmalen;
777 778 779 780 781
			/* special case - last chunk may omit padding */
			if (rdmalen &= 3) {
				rdmalen = 4 - rdmalen;
				status += rdmalen;
			}
782 783
		} else {
			/* else ordinary inline */
784
			rdmalen = 0;
C
Chuck Lever 已提交
785 786 787
			iptr = (__be32 *)((unsigned char *)headerp +
							RPCRDMA_HDRLEN_MIN);
			rep->rr_len -= RPCRDMA_HDRLEN_MIN;
788 789 790
			status = rep->rr_len;
		}
		/* Fix up the rpc results for upper layer */
791
		rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
792 793
		break;

794
	case rdma_nomsg:
795 796 797 798 799 800
		/* never expect read or write chunks, always reply chunks */
		if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
		    headerp->rm_body.rm_chunks[1] != xdr_zero ||
		    headerp->rm_body.rm_chunks[2] != xdr_one ||
		    req->rl_nchunks == 0)
			goto badheader;
C
Chuck Lever 已提交
801 802
		iptr = (__be32 *)((unsigned char *)headerp +
							RPCRDMA_HDRLEN_MIN);
803 804 805 806 807 808 809 810 811 812 813 814 815
		rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
		if (rdmalen < 0)
			goto badheader;
		r_xprt->rx_stats.total_rdma_reply += rdmalen;
		/* Reply chunk buffer already is the reply vector - no fixup. */
		status = rdmalen;
		break;

badheader:
	default:
		dprintk("%s: invalid rpcrdma reply header (type %d):"
				" chunks[012] == %d %d %d"
				" expected chunks <= %d\n",
816
				__func__, be32_to_cpu(headerp->rm_type),
817 818 819 820 821 822 823 824 825
				headerp->rm_body.rm_chunks[0],
				headerp->rm_body.rm_chunks[1],
				headerp->rm_body.rm_chunks[2],
				req->rl_nchunks);
		status = -EIO;
		r_xprt->rx_stats.bad_reply_count++;
		break;
	}

826 827 828 829 830 831
	credits = be32_to_cpu(headerp->rm_credit);
	if (credits == 0)
		credits = 1;	/* don't deadlock */
	else if (credits > r_xprt->rx_buf.rb_max_requests)
		credits = r_xprt->rx_buf.rb_max_requests;

832
	cwnd = xprt->cwnd;
833
	xprt->cwnd = credits << RPC_CWNDSHIFT;
834 835 836
	if (xprt->cwnd > cwnd)
		xprt_release_rqst_cong(rqst->rq_task);

837 838 839 840 841
	dprintk("RPC:       %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
			__func__, xprt, rqst, status);
	xprt_complete_rqst(rqst->rq_task, status);
	spin_unlock(&xprt->transport_lock);
}