verbs.c 40.7 KB
Newer Older
1
/*
C
Chuck Lever 已提交
2
 * Copyright (c) 2014-2017 Oracle.  All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 40
 */

41 42 43 44 45 46 47 48 49 50
/*
 * verbs.c
 *
 * Encapsulates the major functions managing:
 *  o adapters
 *  o endpoints
 *  o connections
 *  o buffer memory
 */

51
#include <linux/interrupt.h>
52
#include <linux/slab.h>
53
#include <linux/sunrpc/addr.h>
54
#include <linux/sunrpc/svc_rdma.h>
55 56

#include <asm-generic/barrier.h>
57
#include <asm/bitops.h>
58

59
#include <rdma/ib_cm.h>
60

61 62
#include "xprt_rdma.h"

63 64 65 66
/*
 * Globals/Macros
 */

J
Jeff Layton 已提交
67
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
68 69 70 71 72 73
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

/*
 * internal functions
 */
C
Chuck Lever 已提交
74 75
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
76
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
77

78
struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
79

80 81
int
rpcrdma_alloc_wq(void)
82
{
83
	struct workqueue_struct *recv_wq;
84

85
	recv_wq = alloc_workqueue("xprtrdma_receive",
86
				  WQ_MEM_RECLAIM | WQ_HIGHPRI,
87 88 89
				  0);
	if (!recv_wq)
		return -ENOMEM;
90

91 92
	rpcrdma_receive_wq = recv_wq;
	return 0;
93 94
}

95 96
void
rpcrdma_destroy_wq(void)
97
{
98
	struct workqueue_struct *wq;
99

100 101 102 103 104
	if (rpcrdma_receive_wq) {
		wq = rpcrdma_receive_wq;
		rpcrdma_receive_wq = NULL;
		destroy_workqueue(wq);
	}
105 106
}

107 108 109 110 111
static void
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
{
	struct rpcrdma_ep *ep = context;

112 113 114
	pr_err("rpcrdma: %s on device %s ep %p\n",
	       ib_event_msg(event->event), event->device->name, context);

115 116
	if (ep->rep_connected == 1) {
		ep->rep_connected = -EIO;
117
		rpcrdma_conn_func(ep);
118 119 120 121
		wake_up_all(&ep->rep_connect_wait);
	}
}

122 123 124 125 126
/**
 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
127 128
 */
static void
129
rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
130
{
131 132 133 134
	struct ib_cqe *cqe = wc->wr_cqe;
	struct rpcrdma_sendctx *sc =
		container_of(cqe, struct rpcrdma_sendctx, sc_cqe);

135
	/* WARNING: Only wr_cqe and status are reliable at this point */
136
	trace_xprtrdma_wc_send(sc, wc);
137 138 139 140
	if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
		       ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
141 142

	rpcrdma_sendctx_put_locked(sc);
143
}
144

145
/**
146
 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
147 148 149 150
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 */
151
static void
152
rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
153
{
154 155 156
	struct ib_cqe *cqe = wc->wr_cqe;
	struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
					       rr_cqe);
157

158
	/* WARNING: Only wr_id and status are reliable at this point */
159
	trace_xprtrdma_wc_receive(rep, wc);
160 161
	if (wc->status != IB_WC_SUCCESS)
		goto out_fail;
162

163
	/* status == SUCCESS means all fields in wc are trustworthy */
164
	rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
165 166 167
	rep->rr_wc_flags = wc->wc_flags;
	rep->rr_inv_rkey = wc->ex.invalidate_rkey;

168
	ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
169
				   rdmab_addr(rep->rr_rdmabuf),
170
				   wc->byte_len, DMA_FROM_DEVICE);
171

172
out_schedule:
173
	rpcrdma_reply_handler(rep);
174
	return;
175

176 177
out_fail:
	if (wc->status != IB_WC_WR_FLUSH_ERR)
178 179 180
		pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
		       ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
181
	rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
182
	goto out_schedule;
183 184
}

185 186 187 188 189 190 191 192
static void
rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
			       struct rdma_conn_param *param)
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
	const struct rpcrdma_connect_private *pmsg = param->private_data;
	unsigned int rsize, wsize;

193
	/* Default settings for RPC-over-RDMA Version One */
194
	r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
195 196 197 198 199 200
	rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
	wsize = RPCRDMA_V1_DEF_INLINE_SIZE;

	if (pmsg &&
	    pmsg->cp_magic == rpcrdma_cmp_magic &&
	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
201
		r_xprt->rx_ia.ri_implicit_roundup = true;
202 203 204 205 206 207 208 209
		rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
		wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
	}

	if (rsize < cdata->inline_rsize)
		cdata->inline_rsize = rsize;
	if (wsize < cdata->inline_wsize)
		cdata->inline_wsize = wsize;
210 211
	dprintk("RPC:       %s: max send %u, max recv %u\n",
		__func__, cdata->inline_wsize, cdata->inline_rsize);
212 213 214
	rpcrdma_set_max_header_sizes(r_xprt);
}

215 216 217 218 219 220 221 222 223 224 225
static int
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
	struct rpcrdma_xprt *xprt = id->context;
	struct rpcrdma_ia *ia = &xprt->rx_ia;
	struct rpcrdma_ep *ep = &xprt->rx_ep;
	int connstate = 0;

	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
226
		ia->ri_async_rc = 0;
227 228 229 230 231 232 233 234 235 236 237 238 239 240
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
		ia->ri_async_rc = -EHOSTUNREACH;
		dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ROUTE_ERROR:
		ia->ri_async_rc = -ENETUNREACH;
		dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
241 242
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
243
		pr_info("rpcrdma: removing device %s for %s:%s\n",
244
			ia->ri_device->name,
245
			rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt));
246 247 248 249 250 251 252 253 254 255 256
#endif
		set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
		ep->rep_connected = -ENODEV;
		xprt_force_disconnect(&xprt->rx_xprt);
		wait_for_completion(&ia->ri_remove_done);

		ia->ri_id = NULL;
		ia->ri_pd = NULL;
		ia->ri_device = NULL;
		/* Return 1 to ensure the core destroys the id. */
		return 1;
257 258
	case RDMA_CM_EVENT_ESTABLISHED:
		connstate = 1;
259
		rpcrdma_update_connect_private(xprt, &event->param.conn);
260 261 262 263 264 265 266 267
		goto connected;
	case RDMA_CM_EVENT_CONNECT_ERROR:
		connstate = -ENOTCONN;
		goto connected;
	case RDMA_CM_EVENT_UNREACHABLE:
		connstate = -ENETDOWN;
		goto connected;
	case RDMA_CM_EVENT_REJECTED:
268 269
		dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
			rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
270
			rdma_reject_msg(id, event->status));
271
		connstate = -ECONNREFUSED;
272 273
		if (event->status == IB_CM_REJ_STALE_CONN)
			connstate = -EAGAIN;
274 275 276 277
		goto connected;
	case RDMA_CM_EVENT_DISCONNECTED:
		connstate = -ECONNABORTED;
connected:
278
		xprt->rx_buf.rb_credits = 1;
279
		ep->rep_connected = connstate;
280
		rpcrdma_conn_func(ep);
281
		wake_up_all(&ep->rep_connect_wait);
282
		/*FALLTHROUGH*/
283
	default:
284 285 286
		dprintk("RPC:       %s: %s:%s on %s/%s (ep 0x%p): %s\n",
			__func__,
			rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
287 288
			ia->ri_device->name, ia->ri_ops->ro_displayname,
			ep, rdma_event_msg(event->event));
289 290 291 292 293 294 295
		break;
	}

	return 0;
}

static struct rdma_cm_id *
296
rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
297
{
298
	unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
299 300 301
	struct rdma_cm_id *id;
	int rc;

302
	init_completion(&ia->ri_done);
303
	init_completion(&ia->ri_remove_done);
304

305 306
	id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
			    IB_QPT_RC);
307 308 309 310 311 312 313
	if (IS_ERR(id)) {
		rc = PTR_ERR(id);
		dprintk("RPC:       %s: rdma_create_id() failed %i\n",
			__func__, rc);
		return id;
	}

314
	ia->ri_async_rc = -ETIMEDOUT;
315 316 317
	rc = rdma_resolve_addr(id, NULL,
			       (struct sockaddr *)&xprt->rx_xprt.addr,
			       RDMA_RESOLVE_TIMEOUT);
318 319 320 321 322
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
			__func__, rc);
		goto out;
	}
323 324 325 326 327 328
	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
	if (rc < 0) {
		dprintk("RPC:       %s: wait() exited: %i\n",
			__func__, rc);
		goto out;
	}
329

330 331 332 333
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

334
	ia->ri_async_rc = -ETIMEDOUT;
335 336 337 338
	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
			__func__, rc);
339
		goto out;
340
	}
341 342 343 344
	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
	if (rc < 0) {
		dprintk("RPC:       %s: wait() exited: %i\n",
			__func__, rc);
345
		goto out;
346
	}
347 348
	rc = ia->ri_async_rc;
	if (rc)
349
		goto out;
350 351

	return id;
352

353 354 355 356 357 358 359 360 361
out:
	rdma_destroy_id(id);
	return ERR_PTR(rc);
}

/*
 * Exported functions.
 */

362 363
/**
 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
364
 * @xprt: transport with IA to (re)initialize
365 366 367
 *
 * Returns 0 on success, negative errno if an appropriate
 * Interface Adapter could not be found and opened.
368 369
 */
int
370
rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
371 372
{
	struct rpcrdma_ia *ia = &xprt->rx_ia;
373 374
	int rc;

375
	ia->ri_id = rpcrdma_create_id(xprt, ia);
376 377
	if (IS_ERR(ia->ri_id)) {
		rc = PTR_ERR(ia->ri_id);
378
		goto out_err;
379
	}
380
	ia->ri_device = ia->ri_id->device;
381

382
	ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
383 384
	if (IS_ERR(ia->ri_pd)) {
		rc = PTR_ERR(ia->ri_pd);
385
		pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
386
		goto out_err;
387 388
	}

389
	switch (xprt_rdma_memreg_strategy) {
390
	case RPCRDMA_FRWR:
391 392 393 394 395
		if (frwr_is_supported(ia)) {
			ia->ri_ops = &rpcrdma_frwr_memreg_ops;
			break;
		}
		/*FALLTHROUGH*/
396
	case RPCRDMA_MTHCAFMR:
397 398 399 400 401
		if (fmr_is_supported(ia)) {
			ia->ri_ops = &rpcrdma_fmr_memreg_ops;
			break;
		}
		/*FALLTHROUGH*/
402
	default:
403 404
		pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
		       ia->ri_device->name, xprt_rdma_memreg_strategy);
405
		rc = -EINVAL;
406
		goto out_err;
407 408 409
	}

	return 0;
410

411 412
out_err:
	rpcrdma_ia_close(ia);
413 414 415
	return rc;
}

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
/**
 * rpcrdma_ia_remove - Handle device driver unload
 * @ia: interface adapter being removed
 *
 * Divest transport H/W resources associated with this adapter,
 * but allow it to be restored later.
 */
void
rpcrdma_ia_remove(struct rpcrdma_ia *ia)
{
	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
						   rx_ia);
	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_req *req;
	struct rpcrdma_rep *rep;

	cancel_delayed_work_sync(&buf->rb_refresh_worker);

	/* This is similar to rpcrdma_ep_destroy, but:
	 * - Don't cancel the connect worker.
	 * - Don't call rpcrdma_ep_disconnect, which waits
	 *   for another conn upcall, which will deadlock.
	 * - rdma_disconnect is unneeded, the underlying
	 *   connection is already gone.
	 */
	if (ia->ri_id->qp) {
		ib_drain_qp(ia->ri_id->qp);
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
	}
	ib_free_cq(ep->rep_attr.recv_cq);
	ib_free_cq(ep->rep_attr.send_cq);

	/* The ULP is responsible for ensuring all DMA
	 * mappings and MRs are gone.
	 */
	list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
		rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf);
	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
		rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf);
		rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
		rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
	}
C
Chuck Lever 已提交
460
	rpcrdma_mrs_destroy(buf);
461 462 463 464 465

	/* Allow waiters to continue */
	complete(&ia->ri_remove_done);
}

466 467 468 469
/**
 * rpcrdma_ia_close - Clean up/close an IA.
 * @ia: interface adapter to close
 *
470 471 472 473 474
 */
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
	dprintk("RPC:       %s: entering\n", __func__);
475 476 477
	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
		if (ia->ri_id->qp)
			rdma_destroy_qp(ia->ri_id);
478
		rdma_destroy_id(ia->ri_id);
479
	}
480 481
	ia->ri_id = NULL;
	ia->ri_device = NULL;
482 483 484

	/* If the pd is still busy, xprtrdma missed freeing a resource */
	if (ia->ri_pd && !IS_ERR(ia->ri_pd))
485
		ib_dealloc_pd(ia->ri_pd);
486
	ia->ri_pd = NULL;
487 488 489 490 491 492 493
}

/*
 * Create unconnected endpoint.
 */
int
rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
494
		  struct rpcrdma_create_data_internal *cdata)
495
{
496
	struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
497
	unsigned int max_qp_wr, max_sge;
498
	struct ib_cq *sendcq, *recvcq;
499
	int rc;
500

501 502
	max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
			RPCRDMA_MAX_SEND_SGES);
503 504
	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
		pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
505 506
		return -ENOMEM;
	}
507
	ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES;
508

509
	if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
510 511 512 513
		dprintk("RPC:       %s: insufficient wqe's available\n",
			__func__);
		return -ENOMEM;
	}
514
	max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
515

516
	/* check provider's send/recv wr limits */
517 518
	if (cdata->max_requests > max_qp_wr)
		cdata->max_requests = max_qp_wr;
519 520 521 522 523

	ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
	ep->rep_attr.qp_context = ep;
	ep->rep_attr.srq = NULL;
	ep->rep_attr.cap.max_send_wr = cdata->max_requests;
524
	ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
525
	ep->rep_attr.cap.max_send_wr += 1;	/* drain cqe */
C
Chuck Lever 已提交
526 527 528
	rc = ia->ri_ops->ro_open(ia, ep, cdata);
	if (rc)
		return rc;
529
	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
530
	ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
531
	ep->rep_attr.cap.max_recv_wr += 1;	/* drain cqe */
532
	ep->rep_attr.cap.max_send_sge = max_sge;
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
	ep->rep_attr.cap.max_recv_sge = 1;
	ep->rep_attr.cap.max_inline_data = 0;
	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	ep->rep_attr.qp_type = IB_QPT_RC;
	ep->rep_attr.port_num = ~0;

	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
		"iovs: send %d recv %d\n",
		__func__,
		ep->rep_attr.cap.max_send_wr,
		ep->rep_attr.cap.max_recv_wr,
		ep->rep_attr.cap.max_send_sge,
		ep->rep_attr.cap.max_recv_sge);

	/* set trigger for requesting send completion */
548 549 550
	ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
				   cdata->max_requests >> 2);
	ep->rep_send_count = ep->rep_send_batch;
551
	init_waitqueue_head(&ep->rep_connect_wait);
552
	INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
553

554 555
	sendcq = ib_alloc_cq(ia->ri_device, NULL,
			     ep->rep_attr.cap.max_send_wr + 1,
556
			     1, IB_POLL_WORKQUEUE);
557 558 559
	if (IS_ERR(sendcq)) {
		rc = PTR_ERR(sendcq);
		dprintk("RPC:       %s: failed to create send CQ: %i\n",
560 561 562 563
			__func__, rc);
		goto out1;
	}

564 565
	recvcq = ib_alloc_cq(ia->ri_device, NULL,
			     ep->rep_attr.cap.max_recv_wr + 1,
566
			     0, IB_POLL_WORKQUEUE);
567 568 569 570 571 572 573 574 575
	if (IS_ERR(recvcq)) {
		rc = PTR_ERR(recvcq);
		dprintk("RPC:       %s: failed to create recv CQ: %i\n",
			__func__, rc);
		goto out2;
	}

	ep->rep_attr.send_cq = sendcq;
	ep->rep_attr.recv_cq = recvcq;
576 577

	/* Initialize cma parameters */
578
	memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
579

580 581 582
	/* Prepare RDMA-CM private message */
	pmsg->cp_magic = rpcrdma_cmp_magic;
	pmsg->cp_version = RPCRDMA_CMP_VERSION;
583
	pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
584 585 586 587
	pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
	pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
	ep->rep_remote_cma.private_data = pmsg;
	ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
588 589

	/* Client offers RDMA Read but does not initiate */
590
	ep->rep_remote_cma.initiator_depth = 0;
591
	if (ia->ri_device->attrs.max_qp_rd_atom > 32)	/* arbitrary but <= 255 */
592 593
		ep->rep_remote_cma.responder_resources = 32;
	else
594
		ep->rep_remote_cma.responder_resources =
595
						ia->ri_device->attrs.max_qp_rd_atom;
596

597 598 599 600 601 602 603 604 605 606
	/* Limit transport retries so client can detect server
	 * GID changes quickly. RPC layer handles re-establishing
	 * transport connection and retransmission.
	 */
	ep->rep_remote_cma.retry_count = 6;

	/* RPC-over-RDMA handles its own flow control. In addition,
	 * make all RNR NAKs visible so we know that RPC-over-RDMA
	 * flow control is working correctly (no NAKs should be seen).
	 */
607 608 609 610 611 612
	ep->rep_remote_cma.flow_control = 0;
	ep->rep_remote_cma.rnr_retry_count = 0;

	return 0;

out2:
613
	ib_free_cq(sendcq);
614 615 616 617 618 619 620 621 622 623 624
out1:
	return rc;
}

/*
 * rpcrdma_ep_destroy
 *
 * Disconnect and destroy endpoint. After this, the only
 * valid operations on the ep are to free it (if dynamically
 * allocated) or re-create it.
 */
625
void
626 627 628 629 630
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	dprintk("RPC:       %s: entering, connected is %d\n",
		__func__, ep->rep_connected);

631 632
	cancel_delayed_work_sync(&ep->rep_connect_worker);

633
	if (ia->ri_id->qp) {
634
		rpcrdma_ep_disconnect(ep, ia);
635 636
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
637 638
	}

639
	ib_free_cq(ep->rep_attr.recv_cq);
640
	ib_free_cq(ep->rep_attr.send_cq);
641 642
}

643 644 645 646 647 648 649 650 651 652 653 654 655
/* Re-establish a connection after a device removal event.
 * Unlike a normal reconnection, a fresh PD and a new set
 * of MRs and buffers is needed.
 */
static int
rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
			 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc, err;

	pr_info("%s: r_xprt = %p\n", __func__, r_xprt);

	rc = -EHOSTUNREACH;
656
	if (rpcrdma_ia_open(r_xprt))
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
		goto out1;

	rc = -ENOMEM;
	err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data);
	if (err) {
		pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
		goto out2;
	}

	rc = -ENETUNREACH;
	err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
	if (err) {
		pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
		goto out3;
	}

C
Chuck Lever 已提交
673
	rpcrdma_mrs_create(r_xprt);
674 675 676 677 678 679 680 681 682 683
	return 0;

out3:
	rpcrdma_ep_destroy(ep, ia);
out2:
	rpcrdma_ia_close(ia);
out1:
	return rc;
}

684 685 686 687 688 689 690 691 692 693 694 695
static int
rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
		     struct rpcrdma_ia *ia)
{
	struct rdma_cm_id *id, *old;
	int err, rc;

	dprintk("RPC:       %s: reconnecting...\n", __func__);

	rpcrdma_ep_disconnect(ep, ia);

	rc = -EHOSTUNREACH;
696
	id = rpcrdma_create_id(r_xprt, ia);
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	if (IS_ERR(id))
		goto out;

	/* As long as the new ID points to the same device as the
	 * old ID, we can reuse the transport's existing PD and all
	 * previously allocated MRs. Also, the same device means
	 * the transport's previous DMA mappings are still valid.
	 *
	 * This is a sanity check only. There should be no way these
	 * point to two different devices here.
	 */
	old = id;
	rc = -ENETUNREACH;
	if (ia->ri_device != id->device) {
		pr_err("rpcrdma: can't reconnect on different device!\n");
		goto out_destroy;
	}

	err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
	if (err) {
		dprintk("RPC:       %s: rdma_create_qp returned %d\n",
			__func__, err);
		goto out_destroy;
	}

	/* Atomically replace the transport's ID and QP. */
	rc = 0;
	old = ia->ri_id;
	ia->ri_id = id;
	rdma_destroy_qp(old);

out_destroy:
729
	rdma_destroy_id(old);
730 731 732 733
out:
	return rc;
}

734 735 736 737 738 739
/*
 * Connect unconnected endpoint.
 */
int
rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
740 741 742
	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
						   rx_ia);
	unsigned int extras;
743
	int rc;
744 745

retry:
746 747
	switch (ep->rep_connected) {
	case 0:
748 749 750 751 752
		dprintk("RPC:       %s: connecting...\n", __func__);
		rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
				__func__, rc);
753 754
			rc = -ENETUNREACH;
			goto out_noupdate;
755
		}
756
		break;
757 758 759 760 761
	case -ENODEV:
		rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
		if (rc)
			goto out_noupdate;
		break;
762 763 764 765
	default:
		rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
		if (rc)
			goto out;
766 767 768 769 770 771 772 773 774 775 776 777 778
	}

	ep->rep_connected = 0;

	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
	if (rc) {
		dprintk("RPC:       %s: rdma_connect() failed with %i\n",
				__func__, rc);
		goto out;
	}

	wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
	if (ep->rep_connected <= 0) {
779
		if (ep->rep_connected == -EAGAIN)
780 781
			goto retry;
		rc = ep->rep_connected;
782
		goto out;
783 784
	}

785 786 787 788 789
	dprintk("RPC:       %s: connected\n", __func__);
	extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
	if (extras)
		rpcrdma_ep_post_extra_recv(r_xprt, extras);

790 791 792
out:
	if (rc)
		ep->rep_connected = rc;
793 794

out_noupdate:
795 796 797 798 799 800 801 802 803 804 805 806
	return rc;
}

/*
 * rpcrdma_ep_disconnect
 *
 * This is separate from destroy to facilitate the ability
 * to reconnect without recreating the endpoint.
 *
 * This call is not reentrant, and must not be made in parallel
 * on the same endpoint.
 */
807
void
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc;

	rc = rdma_disconnect(ia->ri_id);
	if (!rc) {
		/* returns without wait if not connected */
		wait_event_interruptible(ep->rep_connect_wait,
							ep->rep_connected != 1);
		dprintk("RPC:       %s: after wait, %sconnected\n", __func__,
			(ep->rep_connected == 1) ? "still " : "dis");
	} else {
		dprintk("RPC:       %s: rdma_disconnect %i\n", __func__, rc);
		ep->rep_connected = rc;
	}
823 824

	ib_drain_qp(ia->ri_id->qp);
825 826
}

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
/* Fixed-size circular FIFO queue. This implementation is wait-free and
 * lock-free.
 *
 * Consumer is the code path that posts Sends. This path dequeues a
 * sendctx for use by a Send operation. Multiple consumer threads
 * are serialized by the RPC transport lock, which allows only one
 * ->send_request call at a time.
 *
 * Producer is the code path that handles Send completions. This path
 * enqueues a sendctx that has been completed. Multiple producer
 * threads are serialized by the ib_poll_cq() function.
 */

/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
 * queue activity, and ib_drain_qp has flushed all remaining Send
 * requests.
 */
static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
{
	unsigned long i;

	for (i = 0; i <= buf->rb_sc_last; i++)
		kfree(buf->rb_sc_ctxs[i]);
	kfree(buf->rb_sc_ctxs);
}

static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
{
	struct rpcrdma_sendctx *sc;

	sc = kzalloc(sizeof(*sc) +
		     ia->ri_max_send_sges * sizeof(struct ib_sge),
		     GFP_KERNEL);
	if (!sc)
		return NULL;

	sc->sc_wr.wr_cqe = &sc->sc_cqe;
	sc->sc_wr.sg_list = sc->sc_sges;
	sc->sc_wr.opcode = IB_WR_SEND;
	sc->sc_cqe.done = rpcrdma_wc_send;
	return sc;
}

static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_sendctx *sc;
	unsigned long i;

	/* Maximum number of concurrent outstanding Send WRs. Capping
	 * the circular queue size stops Send Queue overflow by causing
	 * the ->send_request call to fail temporarily before too many
	 * Sends are posted.
	 */
	i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
	dprintk("RPC:       %s: allocating %lu send_ctxs\n", __func__, i);
	buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
	if (!buf->rb_sc_ctxs)
		return -ENOMEM;

	buf->rb_sc_last = i - 1;
	for (i = 0; i <= buf->rb_sc_last; i++) {
		sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
		if (!sc)
			goto out_destroy;

		sc->sc_xprt = r_xprt;
		buf->rb_sc_ctxs[i] = sc;
	}

	return 0;

out_destroy:
	rpcrdma_sendctxs_destroy(buf);
	return -ENOMEM;
}

/* The sendctx queue is not guaranteed to have a size that is a
 * power of two, thus the helpers in circ_buf.h cannot be used.
 * The other option is to use modulus (%), which can be expensive.
 */
static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
					  unsigned long item)
{
	return likely(item < buf->rb_sc_last) ? item + 1 : 0;
}

/**
 * rpcrdma_sendctx_get_locked - Acquire a send context
 * @buf: transport buffers from which to acquire an unused context
 *
 * Returns pointer to a free send completion context; or NULL if
 * the queue is empty.
 *
 * Usage: Called to acquire an SGE array before preparing a Send WR.
 *
 * The caller serializes calls to this function (per rpcrdma_buffer),
 * and provides an effective memory barrier that flushes the new value
 * of rb_sc_head.
 */
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_xprt *r_xprt;
	struct rpcrdma_sendctx *sc;
	unsigned long next_head;

	next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);

	if (next_head == READ_ONCE(buf->rb_sc_tail))
		goto out_emptyq;

	/* ORDER: item must be accessed _before_ head is updated */
	sc = buf->rb_sc_ctxs[next_head];

	/* Releasing the lock in the caller acts as a memory
	 * barrier that flushes rb_sc_head.
	 */
	buf->rb_sc_head = next_head;

	return sc;

out_emptyq:
	/* The queue is "empty" if there have not been enough Send
	 * completions recently. This is a sign the Send Queue is
	 * backing up. Cause the caller to pause and try again.
	 */
	dprintk("RPC:       %s: empty sendctx queue\n", __func__);
	r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
	r_xprt->rx_stats.empty_sendctx_q++;
	return NULL;
}

/**
 * rpcrdma_sendctx_put_locked - Release a send context
 * @sc: send context to release
 *
 * Usage: Called from Send completion to return a sendctxt
 * to the queue.
 *
 * The caller serializes calls to this function (per rpcrdma_buffer).
 */
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
{
	struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
	unsigned long next_tail;

	/* Unmap SGEs of previously completed by unsignaled
	 * Sends by walking up the queue until @sc is found.
	 */
	next_tail = buf->rb_sc_tail;
	do {
		next_tail = rpcrdma_sendctx_next(buf, next_tail);

		/* ORDER: item must be accessed _before_ tail is updated */
		rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]);

	} while (buf->rb_sc_ctxs[next_tail] != sc);

	/* Paired with READ_ONCE */
	smp_store_release(&buf->rb_sc_tail, next_tail);
}

989 990 991 992 993
static void
rpcrdma_mr_recovery_worker(struct work_struct *work)
{
	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
						  rb_recovery_worker.work);
C
Chuck Lever 已提交
994
	struct rpcrdma_mr *mr;
995 996 997

	spin_lock(&buf->rb_recovery_lock);
	while (!list_empty(&buf->rb_stale_mrs)) {
C
Chuck Lever 已提交
998
		mr = rpcrdma_mr_pop(&buf->rb_stale_mrs);
999 1000
		spin_unlock(&buf->rb_recovery_lock);

C
Chuck Lever 已提交
1001 1002
		dprintk("RPC:       %s: recovering MR %p\n", __func__, mr);
		mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr);
1003 1004

		spin_lock(&buf->rb_recovery_lock);
1005
	}
1006 1007 1008 1009
	spin_unlock(&buf->rb_recovery_lock);
}

void
C
Chuck Lever 已提交
1010
rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr)
1011
{
C
Chuck Lever 已提交
1012
	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1013 1014 1015
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;

	spin_lock(&buf->rb_recovery_lock);
C
Chuck Lever 已提交
1016
	rpcrdma_mr_push(mr, &buf->rb_stale_mrs);
1017 1018 1019 1020 1021
	spin_unlock(&buf->rb_recovery_lock);

	schedule_delayed_work(&buf->rb_recovery_worker, 0);
}

C
Chuck Lever 已提交
1022
static void
C
Chuck Lever 已提交
1023
rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
C
Chuck Lever 已提交
1024 1025 1026 1027 1028 1029 1030 1031
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	unsigned int count;
	LIST_HEAD(free);
	LIST_HEAD(all);

	for (count = 0; count < 32; count++) {
C
Chuck Lever 已提交
1032
		struct rpcrdma_mr *mr;
C
Chuck Lever 已提交
1033 1034
		int rc;

C
Chuck Lever 已提交
1035 1036
		mr = kzalloc(sizeof(*mr), GFP_KERNEL);
		if (!mr)
C
Chuck Lever 已提交
1037 1038
			break;

C
Chuck Lever 已提交
1039
		rc = ia->ri_ops->ro_init_mr(ia, mr);
C
Chuck Lever 已提交
1040
		if (rc) {
C
Chuck Lever 已提交
1041
			kfree(mr);
C
Chuck Lever 已提交
1042 1043 1044
			break;
		}

C
Chuck Lever 已提交
1045
		mr->mr_xprt = r_xprt;
C
Chuck Lever 已提交
1046

C
Chuck Lever 已提交
1047 1048
		list_add(&mr->mr_list, &free);
		list_add(&mr->mr_all, &all);
C
Chuck Lever 已提交
1049 1050
	}

C
Chuck Lever 已提交
1051 1052
	spin_lock(&buf->rb_mrlock);
	list_splice(&free, &buf->rb_mrs);
C
Chuck Lever 已提交
1053 1054
	list_splice(&all, &buf->rb_all);
	r_xprt->rx_stats.mrs_allocated += count;
C
Chuck Lever 已提交
1055
	spin_unlock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067

	dprintk("RPC:       %s: created %u MRs\n", __func__, count);
}

static void
rpcrdma_mr_refresh_worker(struct work_struct *work)
{
	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
						  rb_refresh_worker.work);
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);

C
Chuck Lever 已提交
1068
	rpcrdma_mrs_create(r_xprt);
C
Chuck Lever 已提交
1069 1070
}

1071
struct rpcrdma_req *
1072 1073
rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
{
1074
	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1075 1076
	struct rpcrdma_req *req;

1077
	req = kzalloc(sizeof(*req), GFP_KERNEL);
1078
	if (req == NULL)
1079
		return ERR_PTR(-ENOMEM);
1080

1081 1082 1083
	spin_lock(&buffer->rb_reqslock);
	list_add(&req->rl_all, &buffer->rb_allreqs);
	spin_unlock(&buffer->rb_reqslock);
1084
	req->rl_buffer = &r_xprt->rx_buf;
1085
	INIT_LIST_HEAD(&req->rl_registered);
1086 1087 1088
	return req;
}

1089 1090 1091 1092 1093 1094 1095
/**
 * rpcrdma_create_rep - Allocate an rpcrdma_rep object
 * @r_xprt: controlling transport
 *
 * Returns 0 on success or a negative errno on failure.
 */
int
1096 1097 1098
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1099
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1100 1101 1102 1103
	struct rpcrdma_rep *rep;
	int rc;

	rc = -ENOMEM;
1104
	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1105 1106 1107
	if (rep == NULL)
		goto out;

1108
	rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
1109
					       DMA_FROM_DEVICE, GFP_KERNEL);
1110 1111
	if (IS_ERR(rep->rr_rdmabuf)) {
		rc = PTR_ERR(rep->rr_rdmabuf);
1112
		goto out_free;
1113
	}
1114 1115
	xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
		     rdmab_length(rep->rr_rdmabuf));
1116

1117
	rep->rr_cqe.done = rpcrdma_wc_receive;
1118
	rep->rr_rxprt = r_xprt;
1119
	INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
1120 1121 1122 1123
	rep->rr_recv_wr.next = NULL;
	rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
	rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
	rep->rr_recv_wr.num_sge = 1;
1124 1125 1126 1127 1128

	spin_lock(&buf->rb_lock);
	list_add(&rep->rr_list, &buf->rb_recv_bufs);
	spin_unlock(&buf->rb_lock);
	return 0;
1129 1130 1131 1132

out_free:
	kfree(rep);
out:
1133 1134 1135
	dprintk("RPC:       %s: reply buffer %d alloc failed\n",
		__func__, rc);
	return rc;
1136 1137
}

1138
int
1139
rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1140
{
1141
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1142 1143
	int i, rc;

1144
	buf->rb_max_requests = r_xprt->rx_data.max_requests;
1145
	buf->rb_bc_srv_max_requests = 0;
C
Chuck Lever 已提交
1146
	spin_lock_init(&buf->rb_mrlock);
1147 1148
	spin_lock_init(&buf->rb_lock);
	spin_lock_init(&buf->rb_recovery_lock);
C
Chuck Lever 已提交
1149
	INIT_LIST_HEAD(&buf->rb_mrs);
C
Chuck Lever 已提交
1150
	INIT_LIST_HEAD(&buf->rb_all);
1151
	INIT_LIST_HEAD(&buf->rb_stale_mrs);
C
Chuck Lever 已提交
1152 1153
	INIT_DELAYED_WORK(&buf->rb_refresh_worker,
			  rpcrdma_mr_refresh_worker);
1154 1155
	INIT_DELAYED_WORK(&buf->rb_recovery_worker,
			  rpcrdma_mr_recovery_worker);
1156

C
Chuck Lever 已提交
1157
	rpcrdma_mrs_create(r_xprt);
1158

1159
	INIT_LIST_HEAD(&buf->rb_send_bufs);
1160 1161
	INIT_LIST_HEAD(&buf->rb_allreqs);
	spin_lock_init(&buf->rb_reqslock);
1162 1163 1164
	for (i = 0; i < buf->rb_max_requests; i++) {
		struct rpcrdma_req *req;

1165 1166
		req = rpcrdma_create_req(r_xprt);
		if (IS_ERR(req)) {
1167 1168
			dprintk("RPC:       %s: request buffer %d alloc"
				" failed\n", __func__, i);
1169
			rc = PTR_ERR(req);
1170 1171
			goto out;
		}
1172
		list_add(&req->rl_list, &buf->rb_send_bufs);
1173 1174 1175
	}

	INIT_LIST_HEAD(&buf->rb_recv_bufs);
1176 1177 1178
	for (i = 0; i <= buf->rb_max_requests; i++) {
		rc = rpcrdma_create_rep(r_xprt);
		if (rc)
1179 1180
			goto out;
	}
1181

1182 1183 1184 1185
	rc = rpcrdma_sendctxs_create(r_xprt);
	if (rc)
		goto out;

1186 1187 1188 1189 1190 1191
	return 0;
out:
	rpcrdma_buffer_destroy(buf);
	return rc;
}

1192 1193 1194 1195 1196 1197
static struct rpcrdma_req *
rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_req *req;

	req = list_first_entry(&buf->rb_send_bufs,
1198
			       struct rpcrdma_req, rl_list);
1199
	list_del_init(&req->rl_list);
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	return req;
}

static struct rpcrdma_rep *
rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_rep *rep;

	rep = list_first_entry(&buf->rb_recv_bufs,
			       struct rpcrdma_rep, rr_list);
	list_del(&rep->rr_list);
	return rep;
}

1214
static void
1215
rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
1216
{
1217
	rpcrdma_free_regbuf(rep->rr_rdmabuf);
1218 1219 1220
	kfree(rep);
}

1221
void
1222
rpcrdma_destroy_req(struct rpcrdma_req *req)
1223
{
1224 1225 1226
	rpcrdma_free_regbuf(req->rl_recvbuf);
	rpcrdma_free_regbuf(req->rl_sendbuf);
	rpcrdma_free_regbuf(req->rl_rdmabuf);
1227 1228 1229
	kfree(req);
}

C
Chuck Lever 已提交
1230
static void
C
Chuck Lever 已提交
1231
rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
C
Chuck Lever 已提交
1232 1233 1234 1235
{
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);
	struct rpcrdma_ia *ia = rdmab_to_ia(buf);
C
Chuck Lever 已提交
1236
	struct rpcrdma_mr *mr;
C
Chuck Lever 已提交
1237 1238 1239
	unsigned int count;

	count = 0;
C
Chuck Lever 已提交
1240
	spin_lock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1241
	while (!list_empty(&buf->rb_all)) {
C
Chuck Lever 已提交
1242 1243
		mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
		list_del(&mr->mr_all);
C
Chuck Lever 已提交
1244

C
Chuck Lever 已提交
1245 1246
		spin_unlock(&buf->rb_mrlock);
		ia->ri_ops->ro_release_mr(mr);
C
Chuck Lever 已提交
1247
		count++;
C
Chuck Lever 已提交
1248
		spin_lock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1249
	}
C
Chuck Lever 已提交
1250
	spin_unlock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1251 1252 1253 1254 1255
	r_xprt->rx_stats.mrs_allocated = 0;

	dprintk("RPC:       %s: released %u MRs\n", __func__, count);
}

1256 1257 1258
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
1259
	cancel_delayed_work_sync(&buf->rb_recovery_worker);
1260
	cancel_delayed_work_sync(&buf->rb_refresh_worker);
1261

1262 1263
	rpcrdma_sendctxs_destroy(buf);

1264 1265
	while (!list_empty(&buf->rb_recv_bufs)) {
		struct rpcrdma_rep *rep;
1266

1267
		rep = rpcrdma_buffer_get_rep_locked(buf);
1268
		rpcrdma_destroy_rep(rep);
1269
	}
1270
	buf->rb_send_count = 0;
1271

1272 1273
	spin_lock(&buf->rb_reqslock);
	while (!list_empty(&buf->rb_allreqs)) {
1274
		struct rpcrdma_req *req;
A
Allen Andrews 已提交
1275

1276 1277 1278 1279 1280
		req = list_first_entry(&buf->rb_allreqs,
				       struct rpcrdma_req, rl_all);
		list_del(&req->rl_all);

		spin_unlock(&buf->rb_reqslock);
1281
		rpcrdma_destroy_req(req);
1282
		spin_lock(&buf->rb_reqslock);
1283
	}
1284
	spin_unlock(&buf->rb_reqslock);
1285
	buf->rb_recv_count = 0;
A
Allen Andrews 已提交
1286

C
Chuck Lever 已提交
1287
	rpcrdma_mrs_destroy(buf);
1288 1289
}

C
Chuck Lever 已提交
1290 1291 1292 1293 1294 1295 1296 1297 1298
/**
 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
 * @r_xprt: controlling transport
 *
 * Returns an initialized rpcrdma_mr or NULL if no free
 * rpcrdma_mr objects are available.
 */
struct rpcrdma_mr *
rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1299
{
1300
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
C
Chuck Lever 已提交
1301
	struct rpcrdma_mr *mr = NULL;
1302

C
Chuck Lever 已提交
1303 1304 1305 1306
	spin_lock(&buf->rb_mrlock);
	if (!list_empty(&buf->rb_mrs))
		mr = rpcrdma_mr_pop(&buf->rb_mrs);
	spin_unlock(&buf->rb_mrlock);
1307

C
Chuck Lever 已提交
1308 1309 1310
	if (!mr)
		goto out_nomrs;
	return mr;
C
Chuck Lever 已提交
1311

C
Chuck Lever 已提交
1312 1313
out_nomrs:
	dprintk("RPC:       %s: no MRs available\n", __func__);
1314 1315
	if (r_xprt->rx_ep.rep_connected != -ENODEV)
		schedule_delayed_work(&buf->rb_refresh_worker, 0);
C
Chuck Lever 已提交
1316 1317 1318 1319 1320

	/* Allow the reply handler and refresh worker to run */
	cond_resched();

	return NULL;
1321 1322
}

1323 1324 1325 1326 1327 1328 1329 1330
static void
__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
{
	spin_lock(&buf->rb_mrlock);
	rpcrdma_mr_push(mr, &buf->rb_mrs);
	spin_unlock(&buf->rb_mrlock);
}

C
Chuck Lever 已提交
1331 1332 1333 1334 1335
/**
 * rpcrdma_mr_put - Release an rpcrdma_mr object
 * @mr: object to release
 *
 */
1336
void
C
Chuck Lever 已提交
1337
rpcrdma_mr_put(struct rpcrdma_mr *mr)
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
{
	__rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
}

/**
 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
 * @mr: object to release
 *
 */
void
rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
1349
{
C
Chuck Lever 已提交
1350
	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1351

1352
	trace_xprtrdma_dma_unmap(mr);
1353 1354 1355
	ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
			mr->mr_sg, mr->mr_nents, mr->mr_dir);
	__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
1356 1357
}

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
static struct rpcrdma_rep *
rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
{
	/* If an RPC previously completed without a reply (say, a
	 * credential problem or a soft timeout occurs) then hold off
	 * on supplying more Receive buffers until the number of new
	 * pending RPCs catches up to the number of posted Receives.
	 */
	if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
		return NULL;

	if (unlikely(list_empty(&buffers->rb_recv_bufs)))
		return NULL;
	buffers->rb_recv_count++;
	return rpcrdma_buffer_get_rep_locked(buffers);
}

1375 1376
/*
 * Get a set of request/reply buffers.
1377 1378
 *
 * Reply buffer (if available) is attached to send buffer upon return.
1379 1380 1381 1382 1383
 */
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{
	struct rpcrdma_req *req;
1384

1385
	spin_lock(&buffers->rb_lock);
1386 1387
	if (list_empty(&buffers->rb_send_bufs))
		goto out_reqbuf;
1388
	buffers->rb_send_count++;
1389
	req = rpcrdma_buffer_get_req_locked(buffers);
1390
	req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1391
	spin_unlock(&buffers->rb_lock);
1392
	return req;
1393

1394
out_reqbuf:
1395
	spin_unlock(&buffers->rb_lock);
1396
	pr_warn("RPC:       %s: out of request buffers\n", __func__);
1397
	return NULL;
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
}

/*
 * Put request/reply buffers back into pool.
 * Pre-decrement counter/array index.
 */
void
rpcrdma_buffer_put(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
1408
	struct rpcrdma_rep *rep = req->rl_reply;
1409

1410 1411
	req->rl_reply = NULL;

1412
	spin_lock(&buffers->rb_lock);
1413
	buffers->rb_send_count--;
1414
	list_add_tail(&req->rl_list, &buffers->rb_send_bufs);
1415 1416
	if (rep) {
		buffers->rb_recv_count--;
1417
		list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1418
	}
1419
	spin_unlock(&buffers->rb_lock);
1420 1421 1422 1423
}

/*
 * Recover reply buffers from pool.
1424
 * This happens when recovering from disconnect.
1425 1426 1427 1428 1429 1430
 */
void
rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;

1431
	spin_lock(&buffers->rb_lock);
1432
	req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1433
	spin_unlock(&buffers->rb_lock);
1434 1435 1436 1437
}

/*
 * Put reply buffers back into pool when not attached to
1438
 * request. This happens in error conditions.
1439 1440 1441 1442
 */
void
rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
1443
	struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1444

1445
	spin_lock(&buffers->rb_lock);
1446
	buffers->rb_recv_count--;
1447
	list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1448
	spin_unlock(&buffers->rb_lock);
1449 1450
}

1451
/**
1452
 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
1453
 * @size: size of buffer to be allocated, in bytes
1454
 * @direction: direction of data movement
1455 1456
 * @flags: GFP flags
 *
1457 1458
 * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
 * can be persistently DMA-mapped for I/O.
1459 1460
 *
 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1461 1462
 * receiving the payload of RDMA RECV operations. During Long Calls
 * or Replies they may be registered externally via ro_map.
1463 1464
 */
struct rpcrdma_regbuf *
1465 1466
rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
		     gfp_t flags)
1467 1468 1469 1470 1471
{
	struct rpcrdma_regbuf *rb;

	rb = kmalloc(sizeof(*rb) + size, flags);
	if (rb == NULL)
1472
		return ERR_PTR(-ENOMEM);
1473

1474
	rb->rg_device = NULL;
1475
	rb->rg_direction = direction;
1476
	rb->rg_iov.length = size;
1477 1478

	return rb;
1479
}
1480

1481 1482 1483 1484 1485 1486 1487 1488
/**
 * __rpcrdma_map_regbuf - DMA-map a regbuf
 * @ia: controlling rpcrdma_ia
 * @rb: regbuf to be mapped
 */
bool
__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{
1489 1490
	struct ib_device *device = ia->ri_device;

1491 1492 1493
	if (rb->rg_direction == DMA_NONE)
		return false;

1494
	rb->rg_iov.addr = ib_dma_map_single(device,
1495 1496 1497
					    (void *)rb->rg_base,
					    rdmab_length(rb),
					    rb->rg_direction);
1498
	if (ib_dma_mapping_error(device, rdmab_addr(rb)))
1499 1500
		return false;

1501
	rb->rg_device = device;
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
	rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
	return true;
}

static void
rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
{
	if (!rpcrdma_regbuf_is_mapped(rb))
		return;

	ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
			    rdmab_length(rb), rb->rg_direction);
	rb->rg_device = NULL;
1515 1516 1517 1518 1519 1520 1521
}

/**
 * rpcrdma_free_regbuf - deregister and free registered buffer
 * @rb: regbuf to be deregistered and freed
 */
void
1522
rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
1523
{
1524 1525 1526
	if (!rb)
		return;

1527
	rpcrdma_dma_unmap_regbuf(rb);
1528
	kfree(rb);
1529 1530
}

1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
/*
 * Prepost any receive buffer, then post send.
 *
 * Receive buffer is donated to hardware, reclaimed upon recv completion.
 */
int
rpcrdma_ep_post(struct rpcrdma_ia *ia,
		struct rpcrdma_ep *ep,
		struct rpcrdma_req *req)
{
1541
	struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
1542
	struct ib_send_wr *send_wr_fail;
1543
	int rc;
1544

1545 1546
	if (req->rl_reply) {
		rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
1547
		if (rc)
1548
			return rc;
1549 1550 1551
		req->rl_reply = NULL;
	}

1552 1553
	if (!ep->rep_send_count ||
	    test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1554 1555 1556 1557 1558 1559
		send_wr->send_flags |= IB_SEND_SIGNALED;
		ep->rep_send_count = ep->rep_send_batch;
	} else {
		send_wr->send_flags &= ~IB_SEND_SIGNALED;
		--ep->rep_send_count;
	}
1560

1561
	rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
1562
	trace_xprtrdma_post_send(req, rc);
1563
	if (rc)
1564
		return -ENOTCONN;
1565
	return 0;
1566 1567 1568 1569 1570 1571
}

int
rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
		     struct rpcrdma_rep *rep)
{
1572
	struct ib_recv_wr *recv_wr_fail;
1573 1574
	int rc;

1575 1576
	if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
		goto out_map;
1577
	rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
1578
	trace_xprtrdma_post_recv(rep, rc);
1579
	if (rc)
1580
		return -ENOTCONN;
1581 1582
	return 0;

1583 1584 1585
out_map:
	pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
	return -EIO;
1586
}
1587

1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
/**
 * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
 * @r_xprt: transport associated with these backchannel resources
 * @min_reqs: minimum number of incoming requests expected
 *
 * Returns zero if all requested buffers were posted, or a negative errno.
 */
int
rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
{
	struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_rep *rep;
	int rc;

	while (count--) {
1604
		spin_lock(&buffers->rb_lock);
1605 1606 1607
		if (list_empty(&buffers->rb_recv_bufs))
			goto out_reqbuf;
		rep = rpcrdma_buffer_get_rep_locked(buffers);
1608
		spin_unlock(&buffers->rb_lock);
1609

1610
		rc = rpcrdma_ep_post_recv(ia, rep);
1611 1612 1613 1614 1615 1616 1617
		if (rc)
			goto out_rc;
	}

	return 0;

out_reqbuf:
1618
	spin_unlock(&buffers->rb_lock);
1619 1620 1621 1622 1623 1624 1625
	pr_warn("%s: no extra receive buffers\n", __func__);
	return -ENOMEM;

out_rc:
	rpcrdma_recv_buffer_put(rep);
	return rc;
}