verbs.c 40.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
C
Chuck Lever 已提交
3
 * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 41
 */

42 43 44 45 46 47 48 49 50 51
/*
 * verbs.c
 *
 * Encapsulates the major functions managing:
 *  o adapters
 *  o endpoints
 *  o connections
 *  o buffer memory
 */

52
#include <linux/interrupt.h>
53
#include <linux/slab.h>
54
#include <linux/sunrpc/addr.h>
55
#include <linux/sunrpc/svc_rdma.h>
56 57

#include <asm-generic/barrier.h>
58
#include <asm/bitops.h>
59

60
#include <rdma/ib_cm.h>
61

62 63
#include "xprt_rdma.h"

64 65 66 67
/*
 * Globals/Macros
 */

J
Jeff Layton 已提交
68
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
69 70 71 72 73 74
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

/*
 * internal functions
 */
C
Chuck Lever 已提交
75 76
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
77
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
78

79
struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
80

81 82
int
rpcrdma_alloc_wq(void)
83
{
84
	struct workqueue_struct *recv_wq;
85

86
	recv_wq = alloc_workqueue("xprtrdma_receive",
87
				  WQ_MEM_RECLAIM | WQ_HIGHPRI,
88 89 90
				  0);
	if (!recv_wq)
		return -ENOMEM;
91

92 93
	rpcrdma_receive_wq = recv_wq;
	return 0;
94 95
}

96 97
void
rpcrdma_destroy_wq(void)
98
{
99
	struct workqueue_struct *wq;
100

101 102 103 104 105
	if (rpcrdma_receive_wq) {
		wq = rpcrdma_receive_wq;
		rpcrdma_receive_wq = NULL;
		destroy_workqueue(wq);
	}
106 107
}

108 109 110 111
static void
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
{
	struct rpcrdma_ep *ep = context;
112 113
	struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
						   rx_ep);
114

115
	trace_xprtrdma_qp_error(r_xprt, event);
116 117 118
	pr_err("rpcrdma: %s on device %s ep %p\n",
	       ib_event_msg(event->event), event->device->name, context);

119 120
	if (ep->rep_connected == 1) {
		ep->rep_connected = -EIO;
121
		rpcrdma_conn_func(ep);
122 123 124 125
		wake_up_all(&ep->rep_connect_wait);
	}
}

126 127 128 129 130
/**
 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
131 132
 */
static void
133
rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
134
{
135 136 137 138
	struct ib_cqe *cqe = wc->wr_cqe;
	struct rpcrdma_sendctx *sc =
		container_of(cqe, struct rpcrdma_sendctx, sc_cqe);

139
	/* WARNING: Only wr_cqe and status are reliable at this point */
140
	trace_xprtrdma_wc_send(sc, wc);
141 142 143 144
	if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
		       ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
145 146

	rpcrdma_sendctx_put_locked(sc);
147
}
148

149
/**
150
 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
151 152 153 154
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 */
155
static void
156
rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
157
{
158 159 160
	struct ib_cqe *cqe = wc->wr_cqe;
	struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
					       rr_cqe);
161

162
	/* WARNING: Only wr_id and status are reliable at this point */
163
	trace_xprtrdma_wc_receive(rep, wc);
164 165
	if (wc->status != IB_WC_SUCCESS)
		goto out_fail;
166

167
	/* status == SUCCESS means all fields in wc are trustworthy */
168
	rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
169 170 171
	rep->rr_wc_flags = wc->wc_flags;
	rep->rr_inv_rkey = wc->ex.invalidate_rkey;

172
	ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
173
				   rdmab_addr(rep->rr_rdmabuf),
174
				   wc->byte_len, DMA_FROM_DEVICE);
175

176
out_schedule:
177
	rpcrdma_reply_handler(rep);
178
	return;
179

180 181
out_fail:
	if (wc->status != IB_WC_WR_FLUSH_ERR)
182 183 184
		pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
		       ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
185
	rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
186
	goto out_schedule;
187 188
}

189 190 191 192 193 194 195 196
static void
rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
			       struct rdma_conn_param *param)
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
	const struct rpcrdma_connect_private *pmsg = param->private_data;
	unsigned int rsize, wsize;

197
	/* Default settings for RPC-over-RDMA Version One */
198
	r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
199 200 201 202 203 204
	rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
	wsize = RPCRDMA_V1_DEF_INLINE_SIZE;

	if (pmsg &&
	    pmsg->cp_magic == rpcrdma_cmp_magic &&
	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
205
		r_xprt->rx_ia.ri_implicit_roundup = true;
206 207 208 209 210 211 212 213
		rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
		wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
	}

	if (rsize < cdata->inline_rsize)
		cdata->inline_rsize = rsize;
	if (wsize < cdata->inline_wsize)
		cdata->inline_wsize = wsize;
214 215
	dprintk("RPC:       %s: max send %u, max recv %u\n",
		__func__, cdata->inline_wsize, cdata->inline_rsize);
216 217 218
	rpcrdma_set_max_header_sizes(r_xprt);
}

219 220 221 222 223 224 225 226
static int
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
	struct rpcrdma_xprt *xprt = id->context;
	struct rpcrdma_ia *ia = &xprt->rx_ia;
	struct rpcrdma_ep *ep = &xprt->rx_ep;
	int connstate = 0;

227
	trace_xprtrdma_conn_upcall(xprt, event);
228 229 230
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
231
		ia->ri_async_rc = 0;
232 233 234
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
235
		ia->ri_async_rc = -EPROTO;
236 237 238 239 240 241
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ROUTE_ERROR:
		ia->ri_async_rc = -ENETUNREACH;
		complete(&ia->ri_done);
		break;
242 243
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
244
		pr_info("rpcrdma: removing device %s for %s:%s\n",
245
			ia->ri_device->name,
246
			rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt));
247 248 249 250 251 252 253 254 255 256
#endif
		set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
		ep->rep_connected = -ENODEV;
		xprt_force_disconnect(&xprt->rx_xprt);
		wait_for_completion(&ia->ri_remove_done);

		ia->ri_id = NULL;
		ia->ri_device = NULL;
		/* Return 1 to ensure the core destroys the id. */
		return 1;
257
	case RDMA_CM_EVENT_ESTABLISHED:
258
		++xprt->rx_xprt.connect_cookie;
259
		connstate = 1;
260
		rpcrdma_update_connect_private(xprt, &event->param.conn);
261 262 263 264 265
		goto connected;
	case RDMA_CM_EVENT_CONNECT_ERROR:
		connstate = -ENOTCONN;
		goto connected;
	case RDMA_CM_EVENT_UNREACHABLE:
266
		connstate = -ENETUNREACH;
267 268
		goto connected;
	case RDMA_CM_EVENT_REJECTED:
269 270
		dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
			rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
271
			rdma_reject_msg(id, event->status));
272
		connstate = -ECONNREFUSED;
273 274
		if (event->status == IB_CM_REJ_STALE_CONN)
			connstate = -EAGAIN;
275 276
		goto connected;
	case RDMA_CM_EVENT_DISCONNECTED:
277
		++xprt->rx_xprt.connect_cookie;
278 279
		connstate = -ECONNABORTED;
connected:
280
		xprt->rx_buf.rb_credits = 1;
281
		ep->rep_connected = connstate;
282
		rpcrdma_conn_func(ep);
283
		wake_up_all(&ep->rep_connect_wait);
284
		/*FALLTHROUGH*/
285
	default:
286 287 288
		dprintk("RPC:       %s: %s:%s on %s/%s (ep 0x%p): %s\n",
			__func__,
			rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
289 290
			ia->ri_device->name, ia->ri_ops->ro_displayname,
			ep, rdma_event_msg(event->event));
291 292 293 294 295 296 297
		break;
	}

	return 0;
}

static struct rdma_cm_id *
298
rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
299
{
300
	unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
301 302 303
	struct rdma_cm_id *id;
	int rc;

304 305
	trace_xprtrdma_conn_start(xprt);

306
	init_completion(&ia->ri_done);
307
	init_completion(&ia->ri_remove_done);
308

309 310
	id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
			    IB_QPT_RC);
311 312 313 314 315 316 317
	if (IS_ERR(id)) {
		rc = PTR_ERR(id);
		dprintk("RPC:       %s: rdma_create_id() failed %i\n",
			__func__, rc);
		return id;
	}

318
	ia->ri_async_rc = -ETIMEDOUT;
319 320 321
	rc = rdma_resolve_addr(id, NULL,
			       (struct sockaddr *)&xprt->rx_xprt.addr,
			       RDMA_RESOLVE_TIMEOUT);
322 323 324 325 326
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
			__func__, rc);
		goto out;
	}
327 328
	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
	if (rc < 0) {
329
		trace_xprtrdma_conn_tout(xprt);
330 331
		goto out;
	}
332

333 334 335 336
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

337
	ia->ri_async_rc = -ETIMEDOUT;
338 339 340 341
	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
			__func__, rc);
342
		goto out;
343
	}
344 345
	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
	if (rc < 0) {
346
		trace_xprtrdma_conn_tout(xprt);
347
		goto out;
348
	}
349 350
	rc = ia->ri_async_rc;
	if (rc)
351
		goto out;
352 353

	return id;
354

355 356 357 358 359 360 361 362 363
out:
	rdma_destroy_id(id);
	return ERR_PTR(rc);
}

/*
 * Exported functions.
 */

364 365
/**
 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
366
 * @xprt: transport with IA to (re)initialize
367 368 369
 *
 * Returns 0 on success, negative errno if an appropriate
 * Interface Adapter could not be found and opened.
370 371
 */
int
372
rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
373 374
{
	struct rpcrdma_ia *ia = &xprt->rx_ia;
375 376
	int rc;

377
	ia->ri_id = rpcrdma_create_id(xprt, ia);
378 379
	if (IS_ERR(ia->ri_id)) {
		rc = PTR_ERR(ia->ri_id);
380
		goto out_err;
381
	}
382
	ia->ri_device = ia->ri_id->device;
383

384
	ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
385 386
	if (IS_ERR(ia->ri_pd)) {
		rc = PTR_ERR(ia->ri_pd);
387
		pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
388
		goto out_err;
389 390
	}

391
	switch (xprt_rdma_memreg_strategy) {
392
	case RPCRDMA_FRWR:
393 394 395 396 397
		if (frwr_is_supported(ia)) {
			ia->ri_ops = &rpcrdma_frwr_memreg_ops;
			break;
		}
		/*FALLTHROUGH*/
398
	case RPCRDMA_MTHCAFMR:
399 400 401 402 403
		if (fmr_is_supported(ia)) {
			ia->ri_ops = &rpcrdma_fmr_memreg_ops;
			break;
		}
		/*FALLTHROUGH*/
404
	default:
405 406
		pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
		       ia->ri_device->name, xprt_rdma_memreg_strategy);
407
		rc = -EINVAL;
408
		goto out_err;
409 410 411
	}

	return 0;
412

413 414
out_err:
	rpcrdma_ia_close(ia);
415 416 417
	return rc;
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
/**
 * rpcrdma_ia_remove - Handle device driver unload
 * @ia: interface adapter being removed
 *
 * Divest transport H/W resources associated with this adapter,
 * but allow it to be restored later.
 */
void
rpcrdma_ia_remove(struct rpcrdma_ia *ia)
{
	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
						   rx_ia);
	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_req *req;
	struct rpcrdma_rep *rep;

	cancel_delayed_work_sync(&buf->rb_refresh_worker);

	/* This is similar to rpcrdma_ep_destroy, but:
	 * - Don't cancel the connect worker.
	 * - Don't call rpcrdma_ep_disconnect, which waits
	 *   for another conn upcall, which will deadlock.
	 * - rdma_disconnect is unneeded, the underlying
	 *   connection is already gone.
	 */
	if (ia->ri_id->qp) {
		ib_drain_qp(ia->ri_id->qp);
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
	}
	ib_free_cq(ep->rep_attr.recv_cq);
450
	ep->rep_attr.recv_cq = NULL;
451
	ib_free_cq(ep->rep_attr.send_cq);
452
	ep->rep_attr.send_cq = NULL;
453 454 455 456 457 458 459 460 461 462 463

	/* The ULP is responsible for ensuring all DMA
	 * mappings and MRs are gone.
	 */
	list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
		rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf);
	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
		rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf);
		rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
		rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
	}
C
Chuck Lever 已提交
464
	rpcrdma_mrs_destroy(buf);
465 466
	ib_dealloc_pd(ia->ri_pd);
	ia->ri_pd = NULL;
467 468 469

	/* Allow waiters to continue */
	complete(&ia->ri_remove_done);
470 471

	trace_xprtrdma_remove(r_xprt);
472 473
}

474 475 476 477
/**
 * rpcrdma_ia_close - Clean up/close an IA.
 * @ia: interface adapter to close
 *
478 479 480 481
 */
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
482 483 484
	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
		if (ia->ri_id->qp)
			rdma_destroy_qp(ia->ri_id);
485
		rdma_destroy_id(ia->ri_id);
486
	}
487 488
	ia->ri_id = NULL;
	ia->ri_device = NULL;
489 490 491

	/* If the pd is still busy, xprtrdma missed freeing a resource */
	if (ia->ri_pd && !IS_ERR(ia->ri_pd))
492
		ib_dealloc_pd(ia->ri_pd);
493
	ia->ri_pd = NULL;
494 495 496 497 498 499 500
}

/*
 * Create unconnected endpoint.
 */
int
rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
501
		  struct rpcrdma_create_data_internal *cdata)
502
{
503
	struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
504
	unsigned int max_qp_wr, max_sge;
505
	struct ib_cq *sendcq, *recvcq;
506
	int rc;
507

508 509
	max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
			RPCRDMA_MAX_SEND_SGES);
510 511
	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
		pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
512 513
		return -ENOMEM;
	}
514
	ia->ri_max_send_sges = max_sge;
515

516
	if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
517 518 519 520
		dprintk("RPC:       %s: insufficient wqe's available\n",
			__func__);
		return -ENOMEM;
	}
521
	max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
522

523
	/* check provider's send/recv wr limits */
524 525
	if (cdata->max_requests > max_qp_wr)
		cdata->max_requests = max_qp_wr;
526 527 528 529 530

	ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
	ep->rep_attr.qp_context = ep;
	ep->rep_attr.srq = NULL;
	ep->rep_attr.cap.max_send_wr = cdata->max_requests;
531
	ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
532
	ep->rep_attr.cap.max_send_wr += 1;	/* drain cqe */
C
Chuck Lever 已提交
533 534 535
	rc = ia->ri_ops->ro_open(ia, ep, cdata);
	if (rc)
		return rc;
536
	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
537
	ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
538
	ep->rep_attr.cap.max_recv_wr += 1;	/* drain cqe */
539
	ep->rep_attr.cap.max_send_sge = max_sge;
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
	ep->rep_attr.cap.max_recv_sge = 1;
	ep->rep_attr.cap.max_inline_data = 0;
	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	ep->rep_attr.qp_type = IB_QPT_RC;
	ep->rep_attr.port_num = ~0;

	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
		"iovs: send %d recv %d\n",
		__func__,
		ep->rep_attr.cap.max_send_wr,
		ep->rep_attr.cap.max_recv_wr,
		ep->rep_attr.cap.max_send_sge,
		ep->rep_attr.cap.max_recv_sge);

	/* set trigger for requesting send completion */
555 556 557
	ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
				   cdata->max_requests >> 2);
	ep->rep_send_count = ep->rep_send_batch;
558
	init_waitqueue_head(&ep->rep_connect_wait);
559
	INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
560

561 562
	sendcq = ib_alloc_cq(ia->ri_device, NULL,
			     ep->rep_attr.cap.max_send_wr + 1,
563
			     1, IB_POLL_WORKQUEUE);
564 565 566
	if (IS_ERR(sendcq)) {
		rc = PTR_ERR(sendcq);
		dprintk("RPC:       %s: failed to create send CQ: %i\n",
567 568 569 570
			__func__, rc);
		goto out1;
	}

571 572
	recvcq = ib_alloc_cq(ia->ri_device, NULL,
			     ep->rep_attr.cap.max_recv_wr + 1,
573
			     0, IB_POLL_WORKQUEUE);
574 575 576 577 578 579 580 581 582
	if (IS_ERR(recvcq)) {
		rc = PTR_ERR(recvcq);
		dprintk("RPC:       %s: failed to create recv CQ: %i\n",
			__func__, rc);
		goto out2;
	}

	ep->rep_attr.send_cq = sendcq;
	ep->rep_attr.recv_cq = recvcq;
583 584

	/* Initialize cma parameters */
585
	memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
586

587 588 589
	/* Prepare RDMA-CM private message */
	pmsg->cp_magic = rpcrdma_cmp_magic;
	pmsg->cp_version = RPCRDMA_CMP_VERSION;
590
	pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
591 592 593 594
	pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
	pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
	ep->rep_remote_cma.private_data = pmsg;
	ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
595 596

	/* Client offers RDMA Read but does not initiate */
597
	ep->rep_remote_cma.initiator_depth = 0;
598 599
	ep->rep_remote_cma.responder_resources =
		min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom);
600

601 602 603 604 605 606 607 608 609 610
	/* Limit transport retries so client can detect server
	 * GID changes quickly. RPC layer handles re-establishing
	 * transport connection and retransmission.
	 */
	ep->rep_remote_cma.retry_count = 6;

	/* RPC-over-RDMA handles its own flow control. In addition,
	 * make all RNR NAKs visible so we know that RPC-over-RDMA
	 * flow control is working correctly (no NAKs should be seen).
	 */
611 612 613 614 615 616
	ep->rep_remote_cma.flow_control = 0;
	ep->rep_remote_cma.rnr_retry_count = 0;

	return 0;

out2:
617
	ib_free_cq(sendcq);
618 619 620 621 622 623 624 625 626 627 628
out1:
	return rc;
}

/*
 * rpcrdma_ep_destroy
 *
 * Disconnect and destroy endpoint. After this, the only
 * valid operations on the ep are to free it (if dynamically
 * allocated) or re-create it.
 */
629
void
630 631
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
632 633
	cancel_delayed_work_sync(&ep->rep_connect_worker);

634
	if (ia->ri_id && ia->ri_id->qp) {
635
		rpcrdma_ep_disconnect(ep, ia);
636 637
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
638 639
	}

640 641 642 643
	if (ep->rep_attr.recv_cq)
		ib_free_cq(ep->rep_attr.recv_cq);
	if (ep->rep_attr.send_cq)
		ib_free_cq(ep->rep_attr.send_cq);
644 645
}

646 647 648 649 650 651 652 653 654 655
/* Re-establish a connection after a device removal event.
 * Unlike a normal reconnection, a fresh PD and a new set
 * of MRs and buffers is needed.
 */
static int
rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
			 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc, err;

656
	trace_xprtrdma_reinsert(r_xprt);
657 658

	rc = -EHOSTUNREACH;
659
	if (rpcrdma_ia_open(r_xprt))
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
		goto out1;

	rc = -ENOMEM;
	err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data);
	if (err) {
		pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
		goto out2;
	}

	rc = -ENETUNREACH;
	err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
	if (err) {
		pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
		goto out3;
	}

C
Chuck Lever 已提交
676
	rpcrdma_mrs_create(r_xprt);
677 678 679 680 681 682 683 684 685 686
	return 0;

out3:
	rpcrdma_ep_destroy(ep, ia);
out2:
	rpcrdma_ia_close(ia);
out1:
	return rc;
}

687 688 689 690 691 692 693
static int
rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
		     struct rpcrdma_ia *ia)
{
	struct rdma_cm_id *id, *old;
	int err, rc;

694
	trace_xprtrdma_reconnect(r_xprt);
695 696 697 698

	rpcrdma_ep_disconnect(ep, ia);

	rc = -EHOSTUNREACH;
699
	id = rpcrdma_create_id(r_xprt, ia);
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	if (IS_ERR(id))
		goto out;

	/* As long as the new ID points to the same device as the
	 * old ID, we can reuse the transport's existing PD and all
	 * previously allocated MRs. Also, the same device means
	 * the transport's previous DMA mappings are still valid.
	 *
	 * This is a sanity check only. There should be no way these
	 * point to two different devices here.
	 */
	old = id;
	rc = -ENETUNREACH;
	if (ia->ri_device != id->device) {
		pr_err("rpcrdma: can't reconnect on different device!\n");
		goto out_destroy;
	}

	err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
	if (err) {
		dprintk("RPC:       %s: rdma_create_qp returned %d\n",
			__func__, err);
		goto out_destroy;
	}

	/* Atomically replace the transport's ID and QP. */
	rc = 0;
	old = ia->ri_id;
	ia->ri_id = id;
	rdma_destroy_qp(old);

out_destroy:
732
	rdma_destroy_id(old);
733 734 735 736
out:
	return rc;
}

737 738 739 740 741 742
/*
 * Connect unconnected endpoint.
 */
int
rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
743 744 745
	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
						   rx_ia);
	unsigned int extras;
746
	int rc;
747 748

retry:
749 750
	switch (ep->rep_connected) {
	case 0:
751 752 753 754 755
		dprintk("RPC:       %s: connecting...\n", __func__);
		rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
				__func__, rc);
756 757
			rc = -ENETUNREACH;
			goto out_noupdate;
758
		}
759
		break;
760 761 762 763 764
	case -ENODEV:
		rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
		if (rc)
			goto out_noupdate;
		break;
765 766 767 768
	default:
		rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
		if (rc)
			goto out;
769 770 771 772 773 774 775 776 777 778 779 780 781
	}

	ep->rep_connected = 0;

	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
	if (rc) {
		dprintk("RPC:       %s: rdma_connect() failed with %i\n",
				__func__, rc);
		goto out;
	}

	wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
	if (ep->rep_connected <= 0) {
782
		if (ep->rep_connected == -EAGAIN)
783 784
			goto retry;
		rc = ep->rep_connected;
785
		goto out;
786 787
	}

788 789 790 791 792
	dprintk("RPC:       %s: connected\n", __func__);
	extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
	if (extras)
		rpcrdma_ep_post_extra_recv(r_xprt, extras);

793 794 795
out:
	if (rc)
		ep->rep_connected = rc;
796 797

out_noupdate:
798 799 800 801 802 803 804 805 806 807 808 809
	return rc;
}

/*
 * rpcrdma_ep_disconnect
 *
 * This is separate from destroy to facilitate the ability
 * to reconnect without recreating the endpoint.
 *
 * This call is not reentrant, and must not be made in parallel
 * on the same endpoint.
 */
810
void
811 812 813 814 815
rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc;

	rc = rdma_disconnect(ia->ri_id);
816
	if (!rc)
817 818 819
		/* returns without wait if not connected */
		wait_event_interruptible(ep->rep_connect_wait,
							ep->rep_connected != 1);
820
	else
821
		ep->rep_connected = rc;
822 823
	trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt,
					       rx_ep), rc);
824 825

	ib_drain_qp(ia->ri_id->qp);
826 827
}

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
/* Fixed-size circular FIFO queue. This implementation is wait-free and
 * lock-free.
 *
 * Consumer is the code path that posts Sends. This path dequeues a
 * sendctx for use by a Send operation. Multiple consumer threads
 * are serialized by the RPC transport lock, which allows only one
 * ->send_request call at a time.
 *
 * Producer is the code path that handles Send completions. This path
 * enqueues a sendctx that has been completed. Multiple producer
 * threads are serialized by the ib_poll_cq() function.
 */

/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
 * queue activity, and ib_drain_qp has flushed all remaining Send
 * requests.
 */
static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
{
	unsigned long i;

	for (i = 0; i <= buf->rb_sc_last; i++)
		kfree(buf->rb_sc_ctxs[i]);
	kfree(buf->rb_sc_ctxs);
}

static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
{
	struct rpcrdma_sendctx *sc;

	sc = kzalloc(sizeof(*sc) +
		     ia->ri_max_send_sges * sizeof(struct ib_sge),
		     GFP_KERNEL);
	if (!sc)
		return NULL;

	sc->sc_wr.wr_cqe = &sc->sc_cqe;
	sc->sc_wr.sg_list = sc->sc_sges;
	sc->sc_wr.opcode = IB_WR_SEND;
	sc->sc_cqe.done = rpcrdma_wc_send;
	return sc;
}

static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_sendctx *sc;
	unsigned long i;

	/* Maximum number of concurrent outstanding Send WRs. Capping
	 * the circular queue size stops Send Queue overflow by causing
	 * the ->send_request call to fail temporarily before too many
	 * Sends are posted.
	 */
	i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
	dprintk("RPC:       %s: allocating %lu send_ctxs\n", __func__, i);
	buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
	if (!buf->rb_sc_ctxs)
		return -ENOMEM;

	buf->rb_sc_last = i - 1;
	for (i = 0; i <= buf->rb_sc_last; i++) {
		sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
		if (!sc)
			goto out_destroy;

		sc->sc_xprt = r_xprt;
		buf->rb_sc_ctxs[i] = sc;
	}

	return 0;

out_destroy:
	rpcrdma_sendctxs_destroy(buf);
	return -ENOMEM;
}

/* The sendctx queue is not guaranteed to have a size that is a
 * power of two, thus the helpers in circ_buf.h cannot be used.
 * The other option is to use modulus (%), which can be expensive.
 */
static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
					  unsigned long item)
{
	return likely(item < buf->rb_sc_last) ? item + 1 : 0;
}

/**
 * rpcrdma_sendctx_get_locked - Acquire a send context
 * @buf: transport buffers from which to acquire an unused context
 *
 * Returns pointer to a free send completion context; or NULL if
 * the queue is empty.
 *
 * Usage: Called to acquire an SGE array before preparing a Send WR.
 *
 * The caller serializes calls to this function (per rpcrdma_buffer),
 * and provides an effective memory barrier that flushes the new value
 * of rb_sc_head.
 */
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_xprt *r_xprt;
	struct rpcrdma_sendctx *sc;
	unsigned long next_head;

	next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);

	if (next_head == READ_ONCE(buf->rb_sc_tail))
		goto out_emptyq;

	/* ORDER: item must be accessed _before_ head is updated */
	sc = buf->rb_sc_ctxs[next_head];

	/* Releasing the lock in the caller acts as a memory
	 * barrier that flushes rb_sc_head.
	 */
	buf->rb_sc_head = next_head;

	return sc;

out_emptyq:
	/* The queue is "empty" if there have not been enough Send
	 * completions recently. This is a sign the Send Queue is
	 * backing up. Cause the caller to pause and try again.
	 */
	dprintk("RPC:       %s: empty sendctx queue\n", __func__);
	r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
	r_xprt->rx_stats.empty_sendctx_q++;
	return NULL;
}

/**
 * rpcrdma_sendctx_put_locked - Release a send context
 * @sc: send context to release
 *
 * Usage: Called from Send completion to return a sendctxt
 * to the queue.
 *
 * The caller serializes calls to this function (per rpcrdma_buffer).
 */
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
{
	struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
	unsigned long next_tail;

	/* Unmap SGEs of previously completed by unsignaled
	 * Sends by walking up the queue until @sc is found.
	 */
	next_tail = buf->rb_sc_tail;
	do {
		next_tail = rpcrdma_sendctx_next(buf, next_tail);

		/* ORDER: item must be accessed _before_ tail is updated */
		rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]);

	} while (buf->rb_sc_ctxs[next_tail] != sc);

	/* Paired with READ_ONCE */
	smp_store_release(&buf->rb_sc_tail, next_tail);
}

990 991 992 993 994
static void
rpcrdma_mr_recovery_worker(struct work_struct *work)
{
	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
						  rb_recovery_worker.work);
C
Chuck Lever 已提交
995
	struct rpcrdma_mr *mr;
996 997 998

	spin_lock(&buf->rb_recovery_lock);
	while (!list_empty(&buf->rb_stale_mrs)) {
C
Chuck Lever 已提交
999
		mr = rpcrdma_mr_pop(&buf->rb_stale_mrs);
1000 1001
		spin_unlock(&buf->rb_recovery_lock);

1002
		trace_xprtrdma_recover_mr(mr);
C
Chuck Lever 已提交
1003
		mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr);
1004 1005

		spin_lock(&buf->rb_recovery_lock);
1006
	}
1007 1008 1009 1010
	spin_unlock(&buf->rb_recovery_lock);
}

void
C
Chuck Lever 已提交
1011
rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr)
1012
{
C
Chuck Lever 已提交
1013
	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1014 1015 1016
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;

	spin_lock(&buf->rb_recovery_lock);
C
Chuck Lever 已提交
1017
	rpcrdma_mr_push(mr, &buf->rb_stale_mrs);
1018 1019 1020 1021 1022
	spin_unlock(&buf->rb_recovery_lock);

	schedule_delayed_work(&buf->rb_recovery_worker, 0);
}

C
Chuck Lever 已提交
1023
static void
C
Chuck Lever 已提交
1024
rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
C
Chuck Lever 已提交
1025 1026 1027 1028 1029 1030 1031
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	unsigned int count;
	LIST_HEAD(free);
	LIST_HEAD(all);

1032
	for (count = 0; count < 3; count++) {
C
Chuck Lever 已提交
1033
		struct rpcrdma_mr *mr;
C
Chuck Lever 已提交
1034 1035
		int rc;

C
Chuck Lever 已提交
1036 1037
		mr = kzalloc(sizeof(*mr), GFP_KERNEL);
		if (!mr)
C
Chuck Lever 已提交
1038 1039
			break;

C
Chuck Lever 已提交
1040
		rc = ia->ri_ops->ro_init_mr(ia, mr);
C
Chuck Lever 已提交
1041
		if (rc) {
C
Chuck Lever 已提交
1042
			kfree(mr);
C
Chuck Lever 已提交
1043 1044 1045
			break;
		}

C
Chuck Lever 已提交
1046
		mr->mr_xprt = r_xprt;
C
Chuck Lever 已提交
1047

C
Chuck Lever 已提交
1048 1049
		list_add(&mr->mr_list, &free);
		list_add(&mr->mr_all, &all);
C
Chuck Lever 已提交
1050 1051
	}

C
Chuck Lever 已提交
1052 1053
	spin_lock(&buf->rb_mrlock);
	list_splice(&free, &buf->rb_mrs);
C
Chuck Lever 已提交
1054 1055
	list_splice(&all, &buf->rb_all);
	r_xprt->rx_stats.mrs_allocated += count;
C
Chuck Lever 已提交
1056
	spin_unlock(&buf->rb_mrlock);
1057
	trace_xprtrdma_createmrs(r_xprt, count);
1058 1059

	xprt_write_space(&r_xprt->rx_xprt);
C
Chuck Lever 已提交
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
}

static void
rpcrdma_mr_refresh_worker(struct work_struct *work)
{
	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
						  rb_refresh_worker.work);
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);

C
Chuck Lever 已提交
1070
	rpcrdma_mrs_create(r_xprt);
C
Chuck Lever 已提交
1071 1072
}

1073
struct rpcrdma_req *
1074 1075
rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
{
1076
	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1077
	struct rpcrdma_regbuf *rb;
1078 1079
	struct rpcrdma_req *req;

1080
	req = kzalloc(sizeof(*req), GFP_KERNEL);
1081
	if (req == NULL)
1082
		return ERR_PTR(-ENOMEM);
1083

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
	rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
				  DMA_TO_DEVICE, GFP_KERNEL);
	if (IS_ERR(rb)) {
		kfree(req);
		return ERR_PTR(-ENOMEM);
	}
	req->rl_rdmabuf = rb;
	xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
	req->rl_buffer = buffer;
	INIT_LIST_HEAD(&req->rl_registered);

1095 1096 1097
	spin_lock(&buffer->rb_reqslock);
	list_add(&req->rl_all, &buffer->rb_allreqs);
	spin_unlock(&buffer->rb_reqslock);
1098 1099 1100
	return req;
}

1101 1102 1103 1104 1105 1106 1107
/**
 * rpcrdma_create_rep - Allocate an rpcrdma_rep object
 * @r_xprt: controlling transport
 *
 * Returns 0 on success or a negative errno on failure.
 */
int
1108 1109 1110
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1111
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1112 1113 1114 1115
	struct rpcrdma_rep *rep;
	int rc;

	rc = -ENOMEM;
1116
	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1117 1118 1119
	if (rep == NULL)
		goto out;

1120
	rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
1121
					       DMA_FROM_DEVICE, GFP_KERNEL);
1122 1123
	if (IS_ERR(rep->rr_rdmabuf)) {
		rc = PTR_ERR(rep->rr_rdmabuf);
1124
		goto out_free;
1125
	}
1126 1127
	xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
		     rdmab_length(rep->rr_rdmabuf));
1128

1129
	rep->rr_cqe.done = rpcrdma_wc_receive;
1130
	rep->rr_rxprt = r_xprt;
1131
	INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
1132 1133 1134 1135
	rep->rr_recv_wr.next = NULL;
	rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
	rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
	rep->rr_recv_wr.num_sge = 1;
1136 1137 1138 1139 1140

	spin_lock(&buf->rb_lock);
	list_add(&rep->rr_list, &buf->rb_recv_bufs);
	spin_unlock(&buf->rb_lock);
	return 0;
1141 1142 1143 1144

out_free:
	kfree(rep);
out:
1145 1146 1147
	dprintk("RPC:       %s: reply buffer %d alloc failed\n",
		__func__, rc);
	return rc;
1148 1149
}

1150
int
1151
rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1152
{
1153
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1154 1155
	int i, rc;

1156
	buf->rb_max_requests = r_xprt->rx_data.max_requests;
1157
	buf->rb_bc_srv_max_requests = 0;
C
Chuck Lever 已提交
1158
	spin_lock_init(&buf->rb_mrlock);
1159 1160
	spin_lock_init(&buf->rb_lock);
	spin_lock_init(&buf->rb_recovery_lock);
C
Chuck Lever 已提交
1161
	INIT_LIST_HEAD(&buf->rb_mrs);
C
Chuck Lever 已提交
1162
	INIT_LIST_HEAD(&buf->rb_all);
1163
	INIT_LIST_HEAD(&buf->rb_stale_mrs);
C
Chuck Lever 已提交
1164 1165
	INIT_DELAYED_WORK(&buf->rb_refresh_worker,
			  rpcrdma_mr_refresh_worker);
1166 1167
	INIT_DELAYED_WORK(&buf->rb_recovery_worker,
			  rpcrdma_mr_recovery_worker);
1168

C
Chuck Lever 已提交
1169
	rpcrdma_mrs_create(r_xprt);
1170

1171
	INIT_LIST_HEAD(&buf->rb_send_bufs);
1172 1173
	INIT_LIST_HEAD(&buf->rb_allreqs);
	spin_lock_init(&buf->rb_reqslock);
1174 1175 1176
	for (i = 0; i < buf->rb_max_requests; i++) {
		struct rpcrdma_req *req;

1177 1178
		req = rpcrdma_create_req(r_xprt);
		if (IS_ERR(req)) {
1179 1180
			dprintk("RPC:       %s: request buffer %d alloc"
				" failed\n", __func__, i);
1181
			rc = PTR_ERR(req);
1182 1183
			goto out;
		}
1184
		list_add(&req->rl_list, &buf->rb_send_bufs);
1185 1186 1187
	}

	INIT_LIST_HEAD(&buf->rb_recv_bufs);
1188 1189 1190
	for (i = 0; i <= buf->rb_max_requests; i++) {
		rc = rpcrdma_create_rep(r_xprt);
		if (rc)
1191 1192
			goto out;
	}
1193

1194 1195 1196 1197
	rc = rpcrdma_sendctxs_create(r_xprt);
	if (rc)
		goto out;

1198 1199 1200 1201 1202 1203
	return 0;
out:
	rpcrdma_buffer_destroy(buf);
	return rc;
}

1204 1205 1206 1207 1208 1209
static struct rpcrdma_req *
rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_req *req;

	req = list_first_entry(&buf->rb_send_bufs,
1210
			       struct rpcrdma_req, rl_list);
1211
	list_del_init(&req->rl_list);
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
	return req;
}

static struct rpcrdma_rep *
rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_rep *rep;

	rep = list_first_entry(&buf->rb_recv_bufs,
			       struct rpcrdma_rep, rr_list);
	list_del(&rep->rr_list);
	return rep;
}

1226
static void
1227
rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
1228
{
1229
	rpcrdma_free_regbuf(rep->rr_rdmabuf);
1230 1231 1232
	kfree(rep);
}

1233
void
1234
rpcrdma_destroy_req(struct rpcrdma_req *req)
1235
{
1236 1237 1238
	rpcrdma_free_regbuf(req->rl_recvbuf);
	rpcrdma_free_regbuf(req->rl_sendbuf);
	rpcrdma_free_regbuf(req->rl_rdmabuf);
1239 1240 1241
	kfree(req);
}

C
Chuck Lever 已提交
1242
static void
C
Chuck Lever 已提交
1243
rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
C
Chuck Lever 已提交
1244 1245 1246 1247
{
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);
	struct rpcrdma_ia *ia = rdmab_to_ia(buf);
C
Chuck Lever 已提交
1248
	struct rpcrdma_mr *mr;
C
Chuck Lever 已提交
1249 1250 1251
	unsigned int count;

	count = 0;
C
Chuck Lever 已提交
1252
	spin_lock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1253
	while (!list_empty(&buf->rb_all)) {
C
Chuck Lever 已提交
1254 1255
		mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
		list_del(&mr->mr_all);
C
Chuck Lever 已提交
1256

C
Chuck Lever 已提交
1257 1258
		spin_unlock(&buf->rb_mrlock);
		ia->ri_ops->ro_release_mr(mr);
C
Chuck Lever 已提交
1259
		count++;
C
Chuck Lever 已提交
1260
		spin_lock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1261
	}
C
Chuck Lever 已提交
1262
	spin_unlock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1263 1264 1265 1266 1267
	r_xprt->rx_stats.mrs_allocated = 0;

	dprintk("RPC:       %s: released %u MRs\n", __func__, count);
}

1268 1269 1270
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
1271
	cancel_delayed_work_sync(&buf->rb_recovery_worker);
1272
	cancel_delayed_work_sync(&buf->rb_refresh_worker);
1273

1274 1275
	rpcrdma_sendctxs_destroy(buf);

1276 1277
	while (!list_empty(&buf->rb_recv_bufs)) {
		struct rpcrdma_rep *rep;
1278

1279
		rep = rpcrdma_buffer_get_rep_locked(buf);
1280
		rpcrdma_destroy_rep(rep);
1281
	}
1282
	buf->rb_send_count = 0;
1283

1284 1285
	spin_lock(&buf->rb_reqslock);
	while (!list_empty(&buf->rb_allreqs)) {
1286
		struct rpcrdma_req *req;
A
Allen Andrews 已提交
1287

1288 1289 1290 1291 1292
		req = list_first_entry(&buf->rb_allreqs,
				       struct rpcrdma_req, rl_all);
		list_del(&req->rl_all);

		spin_unlock(&buf->rb_reqslock);
1293
		rpcrdma_destroy_req(req);
1294
		spin_lock(&buf->rb_reqslock);
1295
	}
1296
	spin_unlock(&buf->rb_reqslock);
1297
	buf->rb_recv_count = 0;
A
Allen Andrews 已提交
1298

C
Chuck Lever 已提交
1299
	rpcrdma_mrs_destroy(buf);
1300 1301
}

C
Chuck Lever 已提交
1302 1303 1304 1305 1306 1307 1308 1309 1310
/**
 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
 * @r_xprt: controlling transport
 *
 * Returns an initialized rpcrdma_mr or NULL if no free
 * rpcrdma_mr objects are available.
 */
struct rpcrdma_mr *
rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1311
{
1312
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
C
Chuck Lever 已提交
1313
	struct rpcrdma_mr *mr = NULL;
1314

C
Chuck Lever 已提交
1315 1316 1317 1318
	spin_lock(&buf->rb_mrlock);
	if (!list_empty(&buf->rb_mrs))
		mr = rpcrdma_mr_pop(&buf->rb_mrs);
	spin_unlock(&buf->rb_mrlock);
1319

C
Chuck Lever 已提交
1320 1321 1322
	if (!mr)
		goto out_nomrs;
	return mr;
C
Chuck Lever 已提交
1323

C
Chuck Lever 已提交
1324
out_nomrs:
1325
	trace_xprtrdma_nomrs(r_xprt);
1326 1327
	if (r_xprt->rx_ep.rep_connected != -ENODEV)
		schedule_delayed_work(&buf->rb_refresh_worker, 0);
C
Chuck Lever 已提交
1328 1329 1330 1331 1332

	/* Allow the reply handler and refresh worker to run */
	cond_resched();

	return NULL;
1333 1334
}

1335 1336 1337 1338 1339 1340 1341 1342
static void
__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
{
	spin_lock(&buf->rb_mrlock);
	rpcrdma_mr_push(mr, &buf->rb_mrs);
	spin_unlock(&buf->rb_mrlock);
}

C
Chuck Lever 已提交
1343 1344 1345 1346 1347
/**
 * rpcrdma_mr_put - Release an rpcrdma_mr object
 * @mr: object to release
 *
 */
1348
void
C
Chuck Lever 已提交
1349
rpcrdma_mr_put(struct rpcrdma_mr *mr)
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
{
	__rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
}

/**
 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
 * @mr: object to release
 *
 */
void
rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
1361
{
C
Chuck Lever 已提交
1362
	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1363

1364
	trace_xprtrdma_dma_unmap(mr);
1365 1366 1367
	ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
			mr->mr_sg, mr->mr_nents, mr->mr_dir);
	__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
1368 1369
}

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
static struct rpcrdma_rep *
rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
{
	/* If an RPC previously completed without a reply (say, a
	 * credential problem or a soft timeout occurs) then hold off
	 * on supplying more Receive buffers until the number of new
	 * pending RPCs catches up to the number of posted Receives.
	 */
	if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
		return NULL;

	if (unlikely(list_empty(&buffers->rb_recv_bufs)))
		return NULL;
	buffers->rb_recv_count++;
	return rpcrdma_buffer_get_rep_locked(buffers);
}

1387 1388
/*
 * Get a set of request/reply buffers.
1389 1390
 *
 * Reply buffer (if available) is attached to send buffer upon return.
1391 1392 1393 1394 1395
 */
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{
	struct rpcrdma_req *req;
1396

1397
	spin_lock(&buffers->rb_lock);
1398 1399
	if (list_empty(&buffers->rb_send_bufs))
		goto out_reqbuf;
1400
	buffers->rb_send_count++;
1401
	req = rpcrdma_buffer_get_req_locked(buffers);
1402
	req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1403
	spin_unlock(&buffers->rb_lock);
1404

1405
	return req;
1406

1407
out_reqbuf:
1408
	spin_unlock(&buffers->rb_lock);
1409
	return NULL;
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
}

/*
 * Put request/reply buffers back into pool.
 * Pre-decrement counter/array index.
 */
void
rpcrdma_buffer_put(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
1420
	struct rpcrdma_rep *rep = req->rl_reply;
1421

1422 1423
	req->rl_reply = NULL;

1424
	spin_lock(&buffers->rb_lock);
1425
	buffers->rb_send_count--;
1426
	list_add_tail(&req->rl_list, &buffers->rb_send_bufs);
1427 1428
	if (rep) {
		buffers->rb_recv_count--;
1429
		list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1430
	}
1431
	spin_unlock(&buffers->rb_lock);
1432 1433 1434 1435
}

/*
 * Recover reply buffers from pool.
1436
 * This happens when recovering from disconnect.
1437 1438 1439 1440 1441 1442
 */
void
rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;

1443
	spin_lock(&buffers->rb_lock);
1444
	req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1445
	spin_unlock(&buffers->rb_lock);
1446 1447 1448 1449
}

/*
 * Put reply buffers back into pool when not attached to
1450
 * request. This happens in error conditions.
1451 1452 1453 1454
 */
void
rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
1455
	struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1456

1457
	spin_lock(&buffers->rb_lock);
1458
	buffers->rb_recv_count--;
1459
	list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1460
	spin_unlock(&buffers->rb_lock);
1461 1462
}

1463
/**
1464
 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
1465
 * @size: size of buffer to be allocated, in bytes
1466
 * @direction: direction of data movement
1467 1468
 * @flags: GFP flags
 *
1469 1470
 * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
 * can be persistently DMA-mapped for I/O.
1471 1472
 *
 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1473 1474
 * receiving the payload of RDMA RECV operations. During Long Calls
 * or Replies they may be registered externally via ro_map.
1475 1476
 */
struct rpcrdma_regbuf *
1477 1478
rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
		     gfp_t flags)
1479 1480 1481 1482 1483
{
	struct rpcrdma_regbuf *rb;

	rb = kmalloc(sizeof(*rb) + size, flags);
	if (rb == NULL)
1484
		return ERR_PTR(-ENOMEM);
1485

1486
	rb->rg_device = NULL;
1487
	rb->rg_direction = direction;
1488
	rb->rg_iov.length = size;
1489 1490

	return rb;
1491
}
1492

1493 1494 1495 1496 1497 1498 1499 1500
/**
 * __rpcrdma_map_regbuf - DMA-map a regbuf
 * @ia: controlling rpcrdma_ia
 * @rb: regbuf to be mapped
 */
bool
__rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{
1501 1502
	struct ib_device *device = ia->ri_device;

1503 1504 1505
	if (rb->rg_direction == DMA_NONE)
		return false;

1506
	rb->rg_iov.addr = ib_dma_map_single(device,
1507 1508 1509
					    (void *)rb->rg_base,
					    rdmab_length(rb),
					    rb->rg_direction);
1510
	if (ib_dma_mapping_error(device, rdmab_addr(rb)))
1511 1512
		return false;

1513
	rb->rg_device = device;
1514 1515 1516 1517 1518 1519 1520
	rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
	return true;
}

static void
rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
{
1521 1522 1523
	if (!rb)
		return;

1524 1525 1526 1527 1528 1529
	if (!rpcrdma_regbuf_is_mapped(rb))
		return;

	ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
			    rdmab_length(rb), rb->rg_direction);
	rb->rg_device = NULL;
1530 1531 1532 1533 1534 1535 1536
}

/**
 * rpcrdma_free_regbuf - deregister and free registered buffer
 * @rb: regbuf to be deregistered and freed
 */
void
1537
rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
1538
{
1539
	rpcrdma_dma_unmap_regbuf(rb);
1540
	kfree(rb);
1541 1542
}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
/*
 * Prepost any receive buffer, then post send.
 *
 * Receive buffer is donated to hardware, reclaimed upon recv completion.
 */
int
rpcrdma_ep_post(struct rpcrdma_ia *ia,
		struct rpcrdma_ep *ep,
		struct rpcrdma_req *req)
{
1553
	struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
1554
	int rc;
1555

1556 1557
	if (req->rl_reply) {
		rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
1558
		if (rc)
1559
			return rc;
1560 1561 1562
		req->rl_reply = NULL;
	}

1563 1564
	if (!ep->rep_send_count ||
	    test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1565 1566 1567 1568 1569 1570
		send_wr->send_flags |= IB_SEND_SIGNALED;
		ep->rep_send_count = ep->rep_send_batch;
	} else {
		send_wr->send_flags &= ~IB_SEND_SIGNALED;
		--ep->rep_send_count;
	}
1571

1572
	rc = ia->ri_ops->ro_send(ia, req);
1573
	trace_xprtrdma_post_send(req, rc);
1574
	if (rc)
1575
		return -ENOTCONN;
1576
	return 0;
1577 1578 1579 1580 1581 1582
}

int
rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
		     struct rpcrdma_rep *rep)
{
1583
	struct ib_recv_wr *recv_wr_fail;
1584 1585
	int rc;

1586 1587
	if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
		goto out_map;
1588
	rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
1589
	trace_xprtrdma_post_recv(rep, rc);
1590
	if (rc)
1591
		return -ENOTCONN;
1592 1593
	return 0;

1594 1595 1596
out_map:
	pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
	return -EIO;
1597
}
1598

1599 1600 1601
/**
 * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
 * @r_xprt: transport associated with these backchannel resources
1602
 * @count: minimum number of incoming requests expected
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
 *
 * Returns zero if all requested buffers were posted, or a negative errno.
 */
int
rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
{
	struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_rep *rep;
	int rc;

	while (count--) {
1615
		spin_lock(&buffers->rb_lock);
1616 1617 1618
		if (list_empty(&buffers->rb_recv_bufs))
			goto out_reqbuf;
		rep = rpcrdma_buffer_get_rep_locked(buffers);
1619
		spin_unlock(&buffers->rb_lock);
1620

1621
		rc = rpcrdma_ep_post_recv(ia, rep);
1622 1623 1624 1625 1626 1627 1628
		if (rc)
			goto out_rc;
	}

	return 0;

out_reqbuf:
1629
	spin_unlock(&buffers->rb_lock);
1630
	trace_xprtrdma_noreps(r_xprt);
1631 1632 1633 1634 1635 1636
	return -ENOMEM;

out_rc:
	rpcrdma_recv_buffer_put(rep);
	return rc;
}