verbs.c 34.9 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 39
 */

40 41 42 43 44 45 46 47 48 49
/*
 * verbs.c
 *
 * Encapsulates the major functions managing:
 *  o adapters
 *  o endpoints
 *  o connections
 *  o buffer memory
 */

50
#include <linux/interrupt.h>
51
#include <linux/slab.h>
52
#include <linux/prefetch.h>
53
#include <linux/sunrpc/addr.h>
54
#include <linux/sunrpc/svc_rdma.h>
55
#include <asm/bitops.h>
56
#include <linux/module.h> /* try_module_get()/module_put() */
57

58 59
#include "xprt_rdma.h"

60 61 62 63
/*
 * Globals/Macros
 */

J
Jeff Layton 已提交
64
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
65 66 67 68 69 70 71
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

/*
 * internal functions
 */

72
static struct workqueue_struct *rpcrdma_receive_wq;
73

74 75
int
rpcrdma_alloc_wq(void)
76
{
77
	struct workqueue_struct *recv_wq;
78

79 80 81 82 83
	recv_wq = alloc_workqueue("xprtrdma_receive",
				  WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI,
				  0);
	if (!recv_wq)
		return -ENOMEM;
84

85 86
	rpcrdma_receive_wq = recv_wq;
	return 0;
87 88
}

89 90
void
rpcrdma_destroy_wq(void)
91
{
92
	struct workqueue_struct *wq;
93

94 95 96 97 98
	if (rpcrdma_receive_wq) {
		wq = rpcrdma_receive_wq;
		rpcrdma_receive_wq = NULL;
		destroy_workqueue(wq);
	}
99 100
}

101 102 103 104 105
static void
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
{
	struct rpcrdma_ep *ep = context;

C
Chuck Lever 已提交
106
	pr_err("RPC:       %s: %s on device %s ep %p\n",
107
	       __func__, ib_event_msg(event->event),
C
Chuck Lever 已提交
108
		event->device->name, context);
109 110
	if (ep->rep_connected == 1) {
		ep->rep_connected = -EIO;
111
		rpcrdma_conn_func(ep);
112 113 114 115
		wake_up_all(&ep->rep_connect_wait);
	}
}

116 117 118 119 120
/**
 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
121 122
 */
static void
123
rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
124
{
125 126 127 128 129
	/* WARNING: Only wr_cqe and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
		       ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
130
}
131

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/* Perform basic sanity checking to avoid using garbage
 * to update the credit grant value.
 */
static void
rpcrdma_update_granted_credits(struct rpcrdma_rep *rep)
{
	struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf);
	struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf;
	u32 credits;

	if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
		return;

	credits = be32_to_cpu(rmsgp->rm_credit);
	if (credits == 0)
		credits = 1;	/* don't deadlock */
	else if (credits > buffer->rb_max_requests)
		credits = buffer->rb_max_requests;

	atomic_set(&buffer->rb_credits, credits);
}

154
/**
155
 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
156 157 158 159
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 */
160
static void
161
rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
162
{
163 164 165
	struct ib_cqe *cqe = wc->wr_cqe;
	struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
					       rr_cqe);
166

167 168 169
	/* WARNING: Only wr_id and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS)
		goto out_fail;
170

171
	/* status == SUCCESS means all fields in wc are trustworthy */
172 173 174
	if (wc->opcode != IB_WC_RECV)
		return;

175 176 177
	dprintk("RPC:       %s: rep %p opcode 'recv', length %u: success\n",
		__func__, rep, wc->byte_len);

178
	rep->rr_len = wc->byte_len;
179 180 181
	rep->rr_wc_flags = wc->wc_flags;
	rep->rr_inv_rkey = wc->ex.invalidate_rkey;

182
	ib_dma_sync_single_for_cpu(rep->rr_device,
183 184
				   rdmab_addr(rep->rr_rdmabuf),
				   rep->rr_len, DMA_FROM_DEVICE);
185 186

	rpcrdma_update_granted_credits(rep);
187 188

out_schedule:
189
	queue_work(rpcrdma_receive_wq, &rep->rr_work);
190
	return;
191

192 193
out_fail:
	if (wc->status != IB_WC_WR_FLUSH_ERR)
194 195 196
		pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
		       ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
197
	rep->rr_len = RPCRDMA_BAD_LEN;
198
	goto out_schedule;
199 200
}

201 202 203 204 205 206 207 208
static void
rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
			       struct rdma_conn_param *param)
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
	const struct rpcrdma_connect_private *pmsg = param->private_data;
	unsigned int rsize, wsize;

209 210
	/* Default settings for RPC-over-RDMA Version One */
	r_xprt->rx_ia.ri_reminv_expected = false;
211 212 213 214 215 216
	rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
	wsize = RPCRDMA_V1_DEF_INLINE_SIZE;

	if (pmsg &&
	    pmsg->cp_magic == rpcrdma_cmp_magic &&
	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
217
		r_xprt->rx_ia.ri_reminv_expected = true;
218 219 220 221 222 223 224 225 226 227 228 229 230
		rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
		wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
	}

	if (rsize < cdata->inline_rsize)
		cdata->inline_rsize = rsize;
	if (wsize < cdata->inline_wsize)
		cdata->inline_wsize = wsize;
	pr_info("rpcrdma: max send %u, max recv %u\n",
		cdata->inline_wsize, cdata->inline_rsize);
	rpcrdma_set_max_header_sizes(r_xprt);
}

231 232 233 234 235 236
static int
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
	struct rpcrdma_xprt *xprt = id->context;
	struct rpcrdma_ia *ia = &xprt->rx_ia;
	struct rpcrdma_ep *ep = &xprt->rx_ep;
J
Jeff Layton 已提交
237
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
238
	struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
239
#endif
240 241
	struct ib_qp_attr *attr = &ia->ri_qp_attr;
	struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
242 243 244 245 246
	int connstate = 0;

	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
247
		ia->ri_async_rc = 0;
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
		ia->ri_async_rc = -EHOSTUNREACH;
		dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ROUTE_ERROR:
		ia->ri_async_rc = -ENETUNREACH;
		dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		connstate = 1;
264 265 266
		ib_query_qp(ia->ri_id->qp, attr,
			    IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
			    iattr);
267 268
		dprintk("RPC:       %s: %d responder resources"
			" (%d initiator)\n",
269 270
			__func__, attr->max_dest_rd_atomic,
			attr->max_rd_atomic);
271
		rpcrdma_update_connect_private(xprt, &event->param.conn);
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
		goto connected;
	case RDMA_CM_EVENT_CONNECT_ERROR:
		connstate = -ENOTCONN;
		goto connected;
	case RDMA_CM_EVENT_UNREACHABLE:
		connstate = -ENETDOWN;
		goto connected;
	case RDMA_CM_EVENT_REJECTED:
		connstate = -ECONNREFUSED;
		goto connected;
	case RDMA_CM_EVENT_DISCONNECTED:
		connstate = -ECONNABORTED;
		goto connected;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		connstate = -ENODEV;
connected:
		dprintk("RPC:       %s: %sconnected\n",
					__func__, connstate > 0 ? "" : "dis");
290
		atomic_set(&xprt->rx_buf.rb_credits, 1);
291
		ep->rep_connected = connstate;
292
		rpcrdma_conn_func(ep);
293
		wake_up_all(&ep->rep_connect_wait);
294
		/*FALLTHROUGH*/
295
	default:
296 297
		dprintk("RPC:       %s: %pIS:%u (ep 0x%p): %s\n",
			__func__, sap, rpc_get_port(sap), ep,
298
			rdma_event_msg(event->event));
299 300 301
		break;
	}

J
Jeff Layton 已提交
302
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
303
	if (connstate == 1) {
304
		int ird = attr->max_dest_rd_atomic;
305
		int tird = ep->rep_remote_cma.responder_resources;
306

307
		pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
308
			sap, rpc_get_port(sap),
309
			ia->ri_device->name,
310
			ia->ri_ops->ro_displayname,
311 312 313
			xprt->rx_buf.rb_max_requests,
			ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
	} else if (connstate < 0) {
314 315
		pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
			sap, rpc_get_port(sap), connstate);
316 317 318
	}
#endif

319 320 321
	return 0;
}

322 323 324 325 326 327 328 329
static void rpcrdma_destroy_id(struct rdma_cm_id *id)
{
	if (id) {
		module_put(id->device->owner);
		rdma_destroy_id(id);
	}
}

330 331 332 333 334 335 336
static struct rdma_cm_id *
rpcrdma_create_id(struct rpcrdma_xprt *xprt,
			struct rpcrdma_ia *ia, struct sockaddr *addr)
{
	struct rdma_cm_id *id;
	int rc;

337 338
	init_completion(&ia->ri_done);

339 340
	id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
			    IB_QPT_RC);
341 342 343 344 345 346 347
	if (IS_ERR(id)) {
		rc = PTR_ERR(id);
		dprintk("RPC:       %s: rdma_create_id() failed %i\n",
			__func__, rc);
		return id;
	}

348
	ia->ri_async_rc = -ETIMEDOUT;
349 350 351 352 353 354
	rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
			__func__, rc);
		goto out;
	}
355 356
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
357 358 359 360 361 362 363 364 365 366 367

	/* FIXME:
	 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
	 * be pinned while there are active NFS/RDMA mounts to prevent
	 * hangs and crashes at umount time.
	 */
	if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
		dprintk("RPC:       %s: Failed to get device module\n",
			__func__);
		ia->ri_async_rc = -ENODEV;
	}
368 369 370 371
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

372
	ia->ri_async_rc = -ETIMEDOUT;
373 374 375 376
	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
			__func__, rc);
377
		goto put;
378
	}
379 380
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
381 382
	rc = ia->ri_async_rc;
	if (rc)
383
		goto put;
384 385

	return id;
386 387
put:
	module_put(id->device->owner);
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
out:
	rdma_destroy_id(id);
	return ERR_PTR(rc);
}

/*
 * Exported functions.
 */

/*
 * Open and initialize an Interface Adapter.
 *  o initializes fields of struct rpcrdma_ia, including
 *    interface and provider attributes and protection zone.
 */
int
rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
{
	struct rpcrdma_ia *ia = &xprt->rx_ia;
406 407
	int rc;

408 409 410 411 412
	ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
	if (IS_ERR(ia->ri_id)) {
		rc = PTR_ERR(ia->ri_id);
		goto out1;
	}
413
	ia->ri_device = ia->ri_id->device;
414

415
	ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
416 417
	if (IS_ERR(ia->ri_pd)) {
		rc = PTR_ERR(ia->ri_pd);
418
		pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
419 420 421
		goto out2;
	}

422
	switch (memreg) {
423
	case RPCRDMA_FRMR:
424 425 426 427 428
		if (frwr_is_supported(ia)) {
			ia->ri_ops = &rpcrdma_frwr_memreg_ops;
			break;
		}
		/*FALLTHROUGH*/
429
	case RPCRDMA_MTHCAFMR:
430 431 432 433 434
		if (fmr_is_supported(ia)) {
			ia->ri_ops = &rpcrdma_fmr_memreg_ops;
			break;
		}
		/*FALLTHROUGH*/
435
	default:
436 437 438
		pr_err("rpcrdma: Unsupported memory registration mode: %d\n",
		       memreg);
		rc = -EINVAL;
439
		goto out3;
440 441 442
	}

	return 0;
443 444 445 446

out3:
	ib_dealloc_pd(ia->ri_pd);
	ia->ri_pd = NULL;
447
out2:
448
	rpcrdma_destroy_id(ia->ri_id);
449
	ia->ri_id = NULL;
450 451 452 453 454 455 456 457 458 459 460 461 462
out1:
	return rc;
}

/*
 * Clean up/close an IA.
 *   o if event handles and PD have been initialized, free them.
 *   o close the IA
 */
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
	dprintk("RPC:       %s: entering\n", __func__);
463 464 465
	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
		if (ia->ri_id->qp)
			rdma_destroy_qp(ia->ri_id);
466
		rpcrdma_destroy_id(ia->ri_id);
467 468
		ia->ri_id = NULL;
	}
469 470 471

	/* If the pd is still busy, xprtrdma missed freeing a resource */
	if (ia->ri_pd && !IS_ERR(ia->ri_pd))
472
		ib_dealloc_pd(ia->ri_pd);
473 474 475 476 477 478 479 480 481
}

/*
 * Create unconnected endpoint.
 */
int
rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
				struct rpcrdma_create_data_internal *cdata)
{
482
	struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
483
	struct ib_cq *sendcq, *recvcq;
484
	unsigned int max_qp_wr;
485
	int rc;
486

487
	if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_SEND_SGES) {
488 489 490 491 492
		dprintk("RPC:       %s: insufficient sge's available\n",
			__func__);
		return -ENOMEM;
	}

493
	if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
494 495 496 497
		dprintk("RPC:       %s: insufficient wqe's available\n",
			__func__);
		return -ENOMEM;
	}
498
	max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
499

500
	/* check provider's send/recv wr limits */
501 502
	if (cdata->max_requests > max_qp_wr)
		cdata->max_requests = max_qp_wr;
503 504 505 506 507

	ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
	ep->rep_attr.qp_context = ep;
	ep->rep_attr.srq = NULL;
	ep->rep_attr.cap.max_send_wr = cdata->max_requests;
508
	ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
509
	ep->rep_attr.cap.max_send_wr += 1;	/* drain cqe */
C
Chuck Lever 已提交
510 511 512
	rc = ia->ri_ops->ro_open(ia, ep, cdata);
	if (rc)
		return rc;
513
	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
514
	ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
515
	ep->rep_attr.cap.max_recv_wr += 1;	/* drain cqe */
516
	ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_SEND_SGES;
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
	ep->rep_attr.cap.max_recv_sge = 1;
	ep->rep_attr.cap.max_inline_data = 0;
	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	ep->rep_attr.qp_type = IB_QPT_RC;
	ep->rep_attr.port_num = ~0;

	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
		"iovs: send %d recv %d\n",
		__func__,
		ep->rep_attr.cap.max_send_wr,
		ep->rep_attr.cap.max_recv_wr,
		ep->rep_attr.cap.max_send_sge,
		ep->rep_attr.cap.max_recv_sge);

	/* set trigger for requesting send completion */
532
	ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
533 534
	if (ep->rep_cqinit <= 2)
		ep->rep_cqinit = 0;	/* always signal? */
535 536
	INIT_CQCOUNT(ep);
	init_waitqueue_head(&ep->rep_connect_wait);
537
	INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
538

539 540 541
	sendcq = ib_alloc_cq(ia->ri_device, NULL,
			     ep->rep_attr.cap.max_send_wr + 1,
			     0, IB_POLL_SOFTIRQ);
542 543 544
	if (IS_ERR(sendcq)) {
		rc = PTR_ERR(sendcq);
		dprintk("RPC:       %s: failed to create send CQ: %i\n",
545 546 547 548
			__func__, rc);
		goto out1;
	}

549 550 551
	recvcq = ib_alloc_cq(ia->ri_device, NULL,
			     ep->rep_attr.cap.max_recv_wr + 1,
			     0, IB_POLL_SOFTIRQ);
552 553 554 555 556 557 558 559 560
	if (IS_ERR(recvcq)) {
		rc = PTR_ERR(recvcq);
		dprintk("RPC:       %s: failed to create recv CQ: %i\n",
			__func__, rc);
		goto out2;
	}

	ep->rep_attr.send_cq = sendcq;
	ep->rep_attr.recv_cq = recvcq;
561 562

	/* Initialize cma parameters */
563
	memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
564

565 566 567
	/* Prepare RDMA-CM private message */
	pmsg->cp_magic = rpcrdma_cmp_magic;
	pmsg->cp_version = RPCRDMA_CMP_VERSION;
568
	pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
569 570 571 572
	pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
	pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
	ep->rep_remote_cma.private_data = pmsg;
	ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
573 574

	/* Client offers RDMA Read but does not initiate */
575
	ep->rep_remote_cma.initiator_depth = 0;
576
	if (ia->ri_device->attrs.max_qp_rd_atom > 32)	/* arbitrary but <= 255 */
577 578
		ep->rep_remote_cma.responder_resources = 32;
	else
579
		ep->rep_remote_cma.responder_resources =
580
						ia->ri_device->attrs.max_qp_rd_atom;
581

582 583 584 585 586 587 588 589 590 591
	/* Limit transport retries so client can detect server
	 * GID changes quickly. RPC layer handles re-establishing
	 * transport connection and retransmission.
	 */
	ep->rep_remote_cma.retry_count = 6;

	/* RPC-over-RDMA handles its own flow control. In addition,
	 * make all RNR NAKs visible so we know that RPC-over-RDMA
	 * flow control is working correctly (no NAKs should be seen).
	 */
592 593 594 595 596 597
	ep->rep_remote_cma.flow_control = 0;
	ep->rep_remote_cma.rnr_retry_count = 0;

	return 0;

out2:
598
	ib_free_cq(sendcq);
599 600 601 602 603 604 605 606 607 608 609
out1:
	return rc;
}

/*
 * rpcrdma_ep_destroy
 *
 * Disconnect and destroy endpoint. After this, the only
 * valid operations on the ep are to free it (if dynamically
 * allocated) or re-create it.
 */
610
void
611 612 613 614 615
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	dprintk("RPC:       %s: entering, connected is %d\n",
		__func__, ep->rep_connected);

616 617
	cancel_delayed_work_sync(&ep->rep_connect_worker);

618
	if (ia->ri_id->qp) {
619
		rpcrdma_ep_disconnect(ep, ia);
620 621
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
622 623
	}

624
	ib_free_cq(ep->rep_attr.recv_cq);
625
	ib_free_cq(ep->rep_attr.send_cq);
626 627 628 629 630 631 632 633
}

/*
 * Connect unconnected endpoint.
 */
int
rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
634
	struct rdma_cm_id *id, *old;
635 636 637
	int rc = 0;
	int retry_count = 0;

638
	if (ep->rep_connected != 0) {
639 640
		struct rpcrdma_xprt *xprt;
retry:
641
		dprintk("RPC:       %s: reconnecting...\n", __func__);
642 643

		rpcrdma_ep_disconnect(ep, ia);
644 645 646 647 648

		xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
		id = rpcrdma_create_id(xprt, ia,
				(struct sockaddr *)&xprt->rx_data.addr);
		if (IS_ERR(id)) {
649
			rc = -EHOSTUNREACH;
650 651 652 653 654 655 656 657 658
			goto out;
		}
		/* TEMP TEMP TEMP - fail if new device:
		 * Deregister/remarshal *all* requests!
		 * Close and recreate adapter, pd, etc!
		 * Re-determine all attributes still sane!
		 * More stuff I haven't thought of!
		 * Rrrgh!
		 */
659
		if (ia->ri_device != id->device) {
660 661
			printk("RPC:       %s: can't reconnect on "
				"different device!\n", __func__);
662
			rpcrdma_destroy_id(id);
663
			rc = -ENETUNREACH;
664 665 666
			goto out;
		}
		/* END TEMP */
667 668 669 670
		rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
				__func__, rc);
671
			rpcrdma_destroy_id(id);
672 673 674
			rc = -ENETUNREACH;
			goto out;
		}
675 676

		old = ia->ri_id;
677
		ia->ri_id = id;
678 679

		rdma_destroy_qp(old);
680
		rpcrdma_destroy_id(old);
681 682 683 684 685 686 687 688 689
	} else {
		dprintk("RPC:       %s: connecting...\n", __func__);
		rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
				__func__, rc);
			/* do not update ep->rep_connected */
			return -ENETUNREACH;
		}
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
	}

	ep->rep_connected = 0;

	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
	if (rc) {
		dprintk("RPC:       %s: rdma_connect() failed with %i\n",
				__func__, rc);
		goto out;
	}

	wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);

	/*
	 * Check state. A non-peer reject indicates no listener
	 * (ECONNREFUSED), which may be a transient state. All
	 * others indicate a transport condition which has already
	 * undergone a best-effort.
	 */
709 710
	if (ep->rep_connected == -ECONNREFUSED &&
	    ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
711 712 713 714 715 716
		dprintk("RPC:       %s: non-peer_reject, retry\n", __func__);
		goto retry;
	}
	if (ep->rep_connected <= 0) {
		/* Sometimes, the only way to reliably connect to remote
		 * CMs is to use same nonzero values for ORD and IRD. */
717 718 719 720 721 722 723 724
		if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
		    (ep->rep_remote_cma.responder_resources == 0 ||
		     ep->rep_remote_cma.initiator_depth !=
				ep->rep_remote_cma.responder_resources)) {
			if (ep->rep_remote_cma.responder_resources == 0)
				ep->rep_remote_cma.responder_resources = 1;
			ep->rep_remote_cma.initiator_depth =
				ep->rep_remote_cma.responder_resources;
725
			goto retry;
726
		}
727 728
		rc = ep->rep_connected;
	} else {
729 730 731
		struct rpcrdma_xprt *r_xprt;
		unsigned int extras;

732
		dprintk("RPC:       %s: connected\n", __func__);
733 734 735 736 737 738

		r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
		extras = r_xprt->rx_buf.rb_bc_srv_max_requests;

		if (extras) {
			rc = rpcrdma_ep_post_extra_recv(r_xprt, extras);
739
			if (rc) {
740 741 742
				pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n",
					__func__, rc);
				rc = 0;
743
			}
744
		}
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	}

out:
	if (rc)
		ep->rep_connected = rc;
	return rc;
}

/*
 * rpcrdma_ep_disconnect
 *
 * This is separate from destroy to facilitate the ability
 * to reconnect without recreating the endpoint.
 *
 * This call is not reentrant, and must not be made in parallel
 * on the same endpoint.
 */
762
void
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc;

	rc = rdma_disconnect(ia->ri_id);
	if (!rc) {
		/* returns without wait if not connected */
		wait_event_interruptible(ep->rep_connect_wait,
							ep->rep_connected != 1);
		dprintk("RPC:       %s: after wait, %sconnected\n", __func__,
			(ep->rep_connected == 1) ? "still " : "dis");
	} else {
		dprintk("RPC:       %s: rdma_disconnect %i\n", __func__, rc);
		ep->rep_connected = rc;
	}
778 779

	ib_drain_qp(ia->ri_id->qp);
780 781
}

782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
static void
rpcrdma_mr_recovery_worker(struct work_struct *work)
{
	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
						  rb_recovery_worker.work);
	struct rpcrdma_mw *mw;

	spin_lock(&buf->rb_recovery_lock);
	while (!list_empty(&buf->rb_stale_mrs)) {
		mw = list_first_entry(&buf->rb_stale_mrs,
				      struct rpcrdma_mw, mw_list);
		list_del_init(&mw->mw_list);
		spin_unlock(&buf->rb_recovery_lock);

		dprintk("RPC:       %s: recovering MR %p\n", __func__, mw);
		mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw);

		spin_lock(&buf->rb_recovery_lock);
800
	}
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	spin_unlock(&buf->rb_recovery_lock);
}

void
rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw)
{
	struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;

	spin_lock(&buf->rb_recovery_lock);
	list_add(&mw->mw_list, &buf->rb_stale_mrs);
	spin_unlock(&buf->rb_recovery_lock);

	schedule_delayed_work(&buf->rb_recovery_worker, 0);
}

C
Chuck Lever 已提交
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
static void
rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	unsigned int count;
	LIST_HEAD(free);
	LIST_HEAD(all);

	for (count = 0; count < 32; count++) {
		struct rpcrdma_mw *mw;
		int rc;

		mw = kzalloc(sizeof(*mw), GFP_KERNEL);
		if (!mw)
			break;

		rc = ia->ri_ops->ro_init_mr(ia, mw);
		if (rc) {
			kfree(mw);
			break;
		}

		mw->mw_xprt = r_xprt;

		list_add(&mw->mw_list, &free);
		list_add(&mw->mw_all, &all);
	}

	spin_lock(&buf->rb_mwlock);
	list_splice(&free, &buf->rb_mws);
	list_splice(&all, &buf->rb_all);
	r_xprt->rx_stats.mrs_allocated += count;
	spin_unlock(&buf->rb_mwlock);

	dprintk("RPC:       %s: created %u MRs\n", __func__, count);
}

static void
rpcrdma_mr_refresh_worker(struct work_struct *work)
{
	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
						  rb_refresh_worker.work);
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);

	rpcrdma_create_mrs(r_xprt);
}

866
struct rpcrdma_req *
867 868
rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
{
869
	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
870 871
	struct rpcrdma_req *req;

872
	req = kzalloc(sizeof(*req), GFP_KERNEL);
873
	if (req == NULL)
874
		return ERR_PTR(-ENOMEM);
875

876 877 878 879
	INIT_LIST_HEAD(&req->rl_free);
	spin_lock(&buffer->rb_reqslock);
	list_add(&req->rl_all, &buffer->rb_allreqs);
	spin_unlock(&buffer->rb_reqslock);
880
	req->rl_cqe.done = rpcrdma_wc_send;
881
	req->rl_buffer = &r_xprt->rx_buf;
882
	INIT_LIST_HEAD(&req->rl_registered);
883 884
	req->rl_send_wr.next = NULL;
	req->rl_send_wr.wr_cqe = &req->rl_cqe;
885
	req->rl_send_wr.sg_list = req->rl_send_sge;
886
	req->rl_send_wr.opcode = IB_WR_SEND;
887 888 889
	return req;
}

890
struct rpcrdma_rep *
891 892 893 894 895 896 897 898
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_rep *rep;
	int rc;

	rc = -ENOMEM;
899
	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
900 901 902
	if (rep == NULL)
		goto out;

903
	rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
904
					       DMA_FROM_DEVICE, GFP_KERNEL);
905 906
	if (IS_ERR(rep->rr_rdmabuf)) {
		rc = PTR_ERR(rep->rr_rdmabuf);
907
		goto out_free;
908
	}
909

910
	rep->rr_device = ia->ri_device;
911
	rep->rr_cqe.done = rpcrdma_wc_receive;
912
	rep->rr_rxprt = r_xprt;
913
	INIT_WORK(&rep->rr_work, rpcrdma_reply_handler);
914 915 916 917
	rep->rr_recv_wr.next = NULL;
	rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
	rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
	rep->rr_recv_wr.num_sge = 1;
918 919 920 921 922 923 924 925
	return rep;

out_free:
	kfree(rep);
out:
	return ERR_PTR(rc);
}

926
int
927
rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
928
{
929
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
930 931
	int i, rc;

932
	buf->rb_max_requests = r_xprt->rx_data.max_requests;
933
	buf->rb_bc_srv_max_requests = 0;
934
	atomic_set(&buf->rb_credits, 1);
C
Chuck Lever 已提交
935
	spin_lock_init(&buf->rb_mwlock);
936 937
	spin_lock_init(&buf->rb_lock);
	spin_lock_init(&buf->rb_recovery_lock);
C
Chuck Lever 已提交
938 939
	INIT_LIST_HEAD(&buf->rb_mws);
	INIT_LIST_HEAD(&buf->rb_all);
940
	INIT_LIST_HEAD(&buf->rb_stale_mrs);
C
Chuck Lever 已提交
941 942
	INIT_DELAYED_WORK(&buf->rb_refresh_worker,
			  rpcrdma_mr_refresh_worker);
943 944
	INIT_DELAYED_WORK(&buf->rb_recovery_worker,
			  rpcrdma_mr_recovery_worker);
945

C
Chuck Lever 已提交
946
	rpcrdma_create_mrs(r_xprt);
947

948
	INIT_LIST_HEAD(&buf->rb_send_bufs);
949 950
	INIT_LIST_HEAD(&buf->rb_allreqs);
	spin_lock_init(&buf->rb_reqslock);
951 952 953
	for (i = 0; i < buf->rb_max_requests; i++) {
		struct rpcrdma_req *req;

954 955
		req = rpcrdma_create_req(r_xprt);
		if (IS_ERR(req)) {
956 957
			dprintk("RPC:       %s: request buffer %d alloc"
				" failed\n", __func__, i);
958
			rc = PTR_ERR(req);
959 960
			goto out;
		}
961
		req->rl_backchannel = false;
962 963 964 965
		list_add(&req->rl_free, &buf->rb_send_bufs);
	}

	INIT_LIST_HEAD(&buf->rb_recv_bufs);
966
	for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
967
		struct rpcrdma_rep *rep;
968

969 970
		rep = rpcrdma_create_rep(r_xprt);
		if (IS_ERR(rep)) {
971 972
			dprintk("RPC:       %s: reply buffer %d alloc failed\n",
				__func__, i);
973
			rc = PTR_ERR(rep);
974 975
			goto out;
		}
976
		list_add(&rep->rr_list, &buf->rb_recv_bufs);
977
	}
978

979 980 981 982 983 984
	return 0;
out:
	rpcrdma_buffer_destroy(buf);
	return rc;
}

985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
static struct rpcrdma_req *
rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_req *req;

	req = list_first_entry(&buf->rb_send_bufs,
			       struct rpcrdma_req, rl_free);
	list_del(&req->rl_free);
	return req;
}

static struct rpcrdma_rep *
rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_rep *rep;

	rep = list_first_entry(&buf->rb_recv_bufs,
			       struct rpcrdma_rep, rr_list);
	list_del(&rep->rr_list);
	return rep;
}

1007
static void
1008
rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
1009
{
1010
	rpcrdma_free_regbuf(rep->rr_rdmabuf);
1011 1012 1013
	kfree(rep);
}

1014
void
1015
rpcrdma_destroy_req(struct rpcrdma_req *req)
1016
{
1017 1018 1019
	rpcrdma_free_regbuf(req->rl_recvbuf);
	rpcrdma_free_regbuf(req->rl_sendbuf);
	rpcrdma_free_regbuf(req->rl_rdmabuf);
1020 1021 1022
	kfree(req);
}

C
Chuck Lever 已提交
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
static void
rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);
	struct rpcrdma_ia *ia = rdmab_to_ia(buf);
	struct rpcrdma_mw *mw;
	unsigned int count;

	count = 0;
	spin_lock(&buf->rb_mwlock);
	while (!list_empty(&buf->rb_all)) {
		mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
		list_del(&mw->mw_all);

		spin_unlock(&buf->rb_mwlock);
		ia->ri_ops->ro_release_mr(mw);
		count++;
		spin_lock(&buf->rb_mwlock);
	}
	spin_unlock(&buf->rb_mwlock);
	r_xprt->rx_stats.mrs_allocated = 0;

	dprintk("RPC:       %s: released %u MRs\n", __func__, count);
}

1049 1050 1051
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
1052 1053
	cancel_delayed_work_sync(&buf->rb_recovery_worker);

1054 1055
	while (!list_empty(&buf->rb_recv_bufs)) {
		struct rpcrdma_rep *rep;
1056

1057
		rep = rpcrdma_buffer_get_rep_locked(buf);
1058
		rpcrdma_destroy_rep(rep);
1059
	}
1060
	buf->rb_send_count = 0;
1061

1062 1063
	spin_lock(&buf->rb_reqslock);
	while (!list_empty(&buf->rb_allreqs)) {
1064
		struct rpcrdma_req *req;
A
Allen Andrews 已提交
1065

1066 1067 1068 1069 1070
		req = list_first_entry(&buf->rb_allreqs,
				       struct rpcrdma_req, rl_all);
		list_del(&req->rl_all);

		spin_unlock(&buf->rb_reqslock);
1071
		rpcrdma_destroy_req(req);
1072
		spin_lock(&buf->rb_reqslock);
1073
	}
1074
	spin_unlock(&buf->rb_reqslock);
1075
	buf->rb_recv_count = 0;
A
Allen Andrews 已提交
1076

C
Chuck Lever 已提交
1077
	rpcrdma_destroy_mrs(buf);
1078 1079
}

1080 1081
struct rpcrdma_mw *
rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1082
{
1083 1084 1085
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_mw *mw = NULL;

C
Chuck Lever 已提交
1086
	spin_lock(&buf->rb_mwlock);
1087 1088 1089 1090
	if (!list_empty(&buf->rb_mws)) {
		mw = list_first_entry(&buf->rb_mws,
				      struct rpcrdma_mw, mw_list);
		list_del_init(&mw->mw_list);
1091
	}
C
Chuck Lever 已提交
1092
	spin_unlock(&buf->rb_mwlock);
1093 1094

	if (!mw)
C
Chuck Lever 已提交
1095
		goto out_nomws;
1096
	return mw;
C
Chuck Lever 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105

out_nomws:
	dprintk("RPC:       %s: no MWs available\n", __func__);
	schedule_delayed_work(&buf->rb_refresh_worker, 0);

	/* Allow the reply handler and refresh worker to run */
	cond_resched();

	return NULL;
1106 1107
}

1108 1109
void
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1110
{
1111
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1112

C
Chuck Lever 已提交
1113
	spin_lock(&buf->rb_mwlock);
1114
	list_add_tail(&mw->mw_list, &buf->rb_mws);
C
Chuck Lever 已提交
1115
	spin_unlock(&buf->rb_mwlock);
1116 1117
}

1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
static struct rpcrdma_rep *
rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
{
	/* If an RPC previously completed without a reply (say, a
	 * credential problem or a soft timeout occurs) then hold off
	 * on supplying more Receive buffers until the number of new
	 * pending RPCs catches up to the number of posted Receives.
	 */
	if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
		return NULL;

	if (unlikely(list_empty(&buffers->rb_recv_bufs)))
		return NULL;
	buffers->rb_recv_count++;
	return rpcrdma_buffer_get_rep_locked(buffers);
}

1135 1136
/*
 * Get a set of request/reply buffers.
1137 1138
 *
 * Reply buffer (if available) is attached to send buffer upon return.
1139 1140 1141 1142 1143
 */
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{
	struct rpcrdma_req *req;
1144

1145
	spin_lock(&buffers->rb_lock);
1146 1147
	if (list_empty(&buffers->rb_send_bufs))
		goto out_reqbuf;
1148
	buffers->rb_send_count++;
1149
	req = rpcrdma_buffer_get_req_locked(buffers);
1150
	req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1151
	spin_unlock(&buffers->rb_lock);
1152
	return req;
1153

1154
out_reqbuf:
1155
	spin_unlock(&buffers->rb_lock);
1156
	pr_warn("RPC:       %s: out of request buffers\n", __func__);
1157
	return NULL;
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
}

/*
 * Put request/reply buffers back into pool.
 * Pre-decrement counter/array index.
 */
void
rpcrdma_buffer_put(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
1168
	struct rpcrdma_rep *rep = req->rl_reply;
1169

1170
	req->rl_send_wr.num_sge = 0;
1171 1172
	req->rl_reply = NULL;

1173
	spin_lock(&buffers->rb_lock);
1174
	buffers->rb_send_count--;
1175
	list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
1176 1177
	if (rep) {
		buffers->rb_recv_count--;
1178
		list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1179
	}
1180
	spin_unlock(&buffers->rb_lock);
1181 1182 1183 1184
}

/*
 * Recover reply buffers from pool.
1185
 * This happens when recovering from disconnect.
1186 1187 1188 1189 1190 1191
 */
void
rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;

1192
	spin_lock(&buffers->rb_lock);
1193
	req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1194
	spin_unlock(&buffers->rb_lock);
1195 1196 1197 1198
}

/*
 * Put reply buffers back into pool when not attached to
1199
 * request. This happens in error conditions.
1200 1201 1202 1203
 */
void
rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
1204
	struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1205

1206
	spin_lock(&buffers->rb_lock);
1207
	buffers->rb_recv_count--;
1208
	list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1209
	spin_unlock(&buffers->rb_lock);
1210 1211
}

1212
/**
1213
 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
1214
 * @size: size of buffer to be allocated, in bytes
1215
 * @direction: direction of data movement
1216 1217
 * @flags: GFP flags
 *
1218 1219
 * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
 * can be persistently DMA-mapped for I/O.
1220 1221
 *
 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1222 1223
 * receiving the payload of RDMA RECV operations. During Long Calls
 * or Replies they may be registered externally via ro_map.
1224 1225
 */
struct rpcrdma_regbuf *
1226 1227
rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
		     gfp_t flags)
1228 1229 1230 1231 1232
{
	struct rpcrdma_regbuf *rb;

	rb = kmalloc(sizeof(*rb) + size, flags);
	if (rb == NULL)
1233
		return ERR_PTR(-ENOMEM);
1234

1235
	rb->rg_device = NULL;
1236
	rb->rg_direction = direction;
1237
	rb->rg_iov.length = size;
1238 1239

	return rb;
1240
}
1241

1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
/**
 * __rpcrdma_map_regbuf - DMA-map a regbuf
 * @ia: controlling rpcrdma_ia
 * @rb: regbuf to be mapped
 */
bool
__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{
	if (rb->rg_direction == DMA_NONE)
		return false;

	rb->rg_iov.addr = ib_dma_map_single(ia->ri_device,
					    (void *)rb->rg_base,
					    rdmab_length(rb),
					    rb->rg_direction);
	if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb)))
		return false;

	rb->rg_device = ia->ri_device;
	rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
	return true;
}

static void
rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
{
	if (!rpcrdma_regbuf_is_mapped(rb))
		return;

	ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
			    rdmab_length(rb), rb->rg_direction);
	rb->rg_device = NULL;
1274 1275 1276 1277 1278 1279 1280
}

/**
 * rpcrdma_free_regbuf - deregister and free registered buffer
 * @rb: regbuf to be deregistered and freed
 */
void
1281
rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
1282
{
1283 1284 1285
	if (!rb)
		return;

1286
	rpcrdma_dma_unmap_regbuf(rb);
1287
	kfree(rb);
1288 1289
}

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
/*
 * Prepost any receive buffer, then post send.
 *
 * Receive buffer is donated to hardware, reclaimed upon recv completion.
 */
int
rpcrdma_ep_post(struct rpcrdma_ia *ia,
		struct rpcrdma_ep *ep,
		struct rpcrdma_req *req)
{
1300 1301
	struct ib_send_wr *send_wr = &req->rl_send_wr;
	struct ib_send_wr *send_wr_fail;
1302
	int rc;
1303

1304 1305
	if (req->rl_reply) {
		rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
1306
		if (rc)
1307
			return rc;
1308 1309 1310
		req->rl_reply = NULL;
	}

1311
	dprintk("RPC:       %s: posting %d s/g entries\n",
1312
		__func__, send_wr->num_sge);
1313 1314

	if (DECR_CQCOUNT(ep) > 0)
1315
		send_wr->send_flags = 0;
1316 1317
	else { /* Provider must take a send completion every now and then */
		INIT_CQCOUNT(ep);
1318
		send_wr->send_flags = IB_SEND_SIGNALED;
1319 1320
	}

1321
	rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
1322
	if (rc)
1323 1324 1325 1326 1327 1328
		goto out_postsend_err;
	return 0;

out_postsend_err:
	pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc);
	return -ENOTCONN;
1329 1330 1331 1332 1333 1334
}

int
rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
		     struct rpcrdma_rep *rep)
{
1335
	struct ib_recv_wr *recv_wr_fail;
1336 1337
	int rc;

1338 1339
	if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
		goto out_map;
1340
	rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
1341
	if (rc)
1342 1343 1344
		goto out_postrecv;
	return 0;

1345 1346 1347 1348
out_map:
	pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
	return -EIO;

1349 1350 1351
out_postrecv:
	pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
	return -ENOTCONN;
1352
}
1353

1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
/**
 * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
 * @r_xprt: transport associated with these backchannel resources
 * @min_reqs: minimum number of incoming requests expected
 *
 * Returns zero if all requested buffers were posted, or a negative errno.
 */
int
rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
{
	struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_rep *rep;
	int rc;

	while (count--) {
1370
		spin_lock(&buffers->rb_lock);
1371 1372 1373
		if (list_empty(&buffers->rb_recv_bufs))
			goto out_reqbuf;
		rep = rpcrdma_buffer_get_rep_locked(buffers);
1374
		spin_unlock(&buffers->rb_lock);
1375

1376
		rc = rpcrdma_ep_post_recv(ia, rep);
1377 1378 1379 1380 1381 1382 1383
		if (rc)
			goto out_rc;
	}

	return 0;

out_reqbuf:
1384
	spin_unlock(&buffers->rb_lock);
1385 1386 1387 1388 1389 1390 1391
	pr_warn("%s: no extra receive buffers\n", __func__);
	return -ENOMEM;

out_rc:
	rpcrdma_recv_buffer_put(rep);
	return rc;
}