verbs.c 39.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/*
C
Chuck Lever 已提交
3
 * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 41
 */

42 43 44 45 46 47 48 49 50 51
/*
 * verbs.c
 *
 * Encapsulates the major functions managing:
 *  o adapters
 *  o endpoints
 *  o connections
 *  o buffer memory
 */

52
#include <linux/interrupt.h>
53
#include <linux/slab.h>
54
#include <linux/sunrpc/addr.h>
55
#include <linux/sunrpc/svc_rdma.h>
56 57

#include <asm-generic/barrier.h>
58
#include <asm/bitops.h>
59

60
#include <rdma/ib_cm.h>
61

62
#include "xprt_rdma.h"
63
#include <trace/events/rpcrdma.h>
64

65 66 67 68
/*
 * Globals/Macros
 */

J
Jeff Layton 已提交
69
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
70 71 72 73 74 75
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

/*
 * internal functions
 */
76
static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
C
Chuck Lever 已提交
77 78
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
C
Chuck Lever 已提交
79 80 81 82 83
static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
		     gfp_t flags);
static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
84
static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
85

86 87 88
/* Wait for outstanding transport work to finish. ib_drain_qp
 * handles the drains in the wrong order for us, so open code
 * them here.
89 90
 */
static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
91
{
92 93
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
94

95 96 97
	/* Flush Receives, then wait for deferred Reply work
	 * to complete.
	 */
98
	ib_drain_rq(ia->ri_id->qp);
99
	drain_workqueue(buf->rb_completion_wq);
100

101 102 103 104
	/* Deferred Reply processing might have scheduled
	 * local invalidations.
	 */
	ib_drain_sq(ia->ri_id->qp);
105 106
}

107 108 109 110 111 112 113 114
/**
 * rpcrdma_qp_event_handler - Handle one QP event (error notification)
 * @event: details of the event
 * @context: ep that owns QP where event occurred
 *
 * Called from the RDMA provider (device driver) possibly in an interrupt
 * context.
 */
115
static void
116
rpcrdma_qp_event_handler(struct ib_event *event, void *context)
117 118
{
	struct rpcrdma_ep *ep = context;
119 120
	struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
						   rx_ep);
121

122
	trace_xprtrdma_qp_event(r_xprt, event);
123 124
}

125 126 127 128 129
/**
 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
130 131
 */
static void
132
rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
133
{
134 135 136 137
	struct ib_cqe *cqe = wc->wr_cqe;
	struct rpcrdma_sendctx *sc =
		container_of(cqe, struct rpcrdma_sendctx, sc_cqe);

138
	/* WARNING: Only wr_cqe and status are reliable at this point */
139
	trace_xprtrdma_wc_send(sc, wc);
140
	rpcrdma_sendctx_put_locked(sc);
141
}
142

143
/**
144
 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
145 146 147 148
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 */
149
static void
150
rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
151
{
152 153 154
	struct ib_cqe *cqe = wc->wr_cqe;
	struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
					       rr_cqe);
155
	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
156

157
	/* WARNING: Only wr_cqe and status are reliable at this point */
158
	trace_xprtrdma_wc_receive(wc);
159
	--r_xprt->rx_ep.rep_receive_count;
160
	if (wc->status != IB_WC_SUCCESS)
161
		goto out_flushed;
162

163
	/* status == SUCCESS means all fields in wc are trustworthy */
164
	rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
165 166 167
	rep->rr_wc_flags = wc->wc_flags;
	rep->rr_inv_rkey = wc->ex.invalidate_rkey;

168
	ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
169
				   rdmab_addr(rep->rr_rdmabuf),
170
				   wc->byte_len, DMA_FROM_DEVICE);
171

172
	rpcrdma_post_recvs(r_xprt, false);
173
	rpcrdma_reply_handler(rep);
174
	return;
175

176 177
out_flushed:
	rpcrdma_recv_buffer_put(rep);
178 179
}

180 181 182 183 184 185 186
static void
rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
			       struct rdma_conn_param *param)
{
	const struct rpcrdma_connect_private *pmsg = param->private_data;
	unsigned int rsize, wsize;

187
	/* Default settings for RPC-over-RDMA Version One */
188
	r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
189 190 191 192 193 194
	rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
	wsize = RPCRDMA_V1_DEF_INLINE_SIZE;

	if (pmsg &&
	    pmsg->cp_magic == rpcrdma_cmp_magic &&
	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
195
		r_xprt->rx_ia.ri_implicit_roundup = true;
196 197 198 199
		rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
		wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
	}

200 201 202 203 204 205 206
	if (rsize < r_xprt->rx_ep.rep_inline_recv)
		r_xprt->rx_ep.rep_inline_recv = rsize;
	if (wsize < r_xprt->rx_ep.rep_inline_send)
		r_xprt->rx_ep.rep_inline_send = wsize;
	dprintk("RPC:       %s: max send %u, max recv %u\n", __func__,
		r_xprt->rx_ep.rep_inline_send,
		r_xprt->rx_ep.rep_inline_recv);
207 208 209
	rpcrdma_set_max_header_sizes(r_xprt);
}

C
Chuck Lever 已提交
210 211 212 213 214 215 216 217
/**
 * rpcrdma_cm_event_handler - Handle RDMA CM events
 * @id: rdma_cm_id on which an event has occurred
 * @event: details of the event
 *
 * Called with @id's mutex held. Returns 1 if caller should
 * destroy @id, otherwise 0.
 */
218
static int
C
Chuck Lever 已提交
219
rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
220
{
221 222 223 224
	struct rpcrdma_xprt *r_xprt = id->context;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
225

C
Chuck Lever 已提交
226 227
	might_sleep();

228
	trace_xprtrdma_cm_event(r_xprt, event);
229 230 231
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
232
		ia->ri_async_rc = 0;
233
		complete(&ia->ri_done);
234
		return 0;
235
	case RDMA_CM_EVENT_ADDR_ERROR:
236
		ia->ri_async_rc = -EPROTO;
237
		complete(&ia->ri_done);
238
		return 0;
239 240 241
	case RDMA_CM_EVENT_ROUTE_ERROR:
		ia->ri_async_rc = -ENETUNREACH;
		complete(&ia->ri_done);
242
		return 0;
243 244
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
245
		pr_info("rpcrdma: removing device %s for %s:%s\n",
246
			ia->ri_id->device->name,
247
			rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
248 249 250
#endif
		set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
		ep->rep_connected = -ENODEV;
251
		xprt_force_disconnect(xprt);
252 253 254 255 256
		wait_for_completion(&ia->ri_remove_done);

		ia->ri_id = NULL;
		/* Return 1 to ensure the core destroys the id. */
		return 1;
257
	case RDMA_CM_EVENT_ESTABLISHED:
258
		++xprt->connect_cookie;
259
		ep->rep_connected = 1;
260
		rpcrdma_update_connect_private(r_xprt, &event->param.conn);
261 262
		wake_up_all(&ep->rep_connect_wait);
		break;
263
	case RDMA_CM_EVENT_CONNECT_ERROR:
264
		ep->rep_connected = -ENOTCONN;
265
		goto disconnected;
266
	case RDMA_CM_EVENT_UNREACHABLE:
267
		ep->rep_connected = -ENETUNREACH;
268
		goto disconnected;
269
	case RDMA_CM_EVENT_REJECTED:
270
		dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
271
			rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
272
			rdma_reject_msg(id, event->status));
273
		ep->rep_connected = -ECONNREFUSED;
274
		if (event->status == IB_CM_REJ_STALE_CONN)
275
			ep->rep_connected = -EAGAIN;
276
		goto disconnected;
277
	case RDMA_CM_EVENT_DISCONNECTED:
278
		ep->rep_connected = -ECONNABORTED;
279 280
disconnected:
		xprt_force_disconnect(xprt);
281
		wake_up_all(&ep->rep_connect_wait);
282
		break;
283 284 285 286
	default:
		break;
	}

287
	dprintk("RPC:       %s: %s:%s on %s/frwr: %s\n", __func__,
288
		rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
289
		ia->ri_id->device->name, rdma_event_msg(event->event));
290 291 292 293
	return 0;
}

static struct rdma_cm_id *
294
rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
295
{
296
	unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
297 298 299
	struct rdma_cm_id *id;
	int rc;

300 301
	trace_xprtrdma_conn_start(xprt);

302
	init_completion(&ia->ri_done);
303
	init_completion(&ia->ri_remove_done);
304

C
Chuck Lever 已提交
305
	id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
306
			    xprt, RDMA_PS_TCP, IB_QPT_RC);
307
	if (IS_ERR(id))
308 309
		return id;

310
	ia->ri_async_rc = -ETIMEDOUT;
311 312 313
	rc = rdma_resolve_addr(id, NULL,
			       (struct sockaddr *)&xprt->rx_xprt.addr,
			       RDMA_RESOLVE_TIMEOUT);
314
	if (rc)
315
		goto out;
316 317
	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
	if (rc < 0) {
318
		trace_xprtrdma_conn_tout(xprt);
319 320
		goto out;
	}
321

322 323 324 325
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

326
	ia->ri_async_rc = -ETIMEDOUT;
327
	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
328
	if (rc)
329
		goto out;
330 331
	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
	if (rc < 0) {
332
		trace_xprtrdma_conn_tout(xprt);
333
		goto out;
334
	}
335 336
	rc = ia->ri_async_rc;
	if (rc)
337
		goto out;
338 339

	return id;
340

341 342 343 344 345 346 347 348 349
out:
	rdma_destroy_id(id);
	return ERR_PTR(rc);
}

/*
 * Exported functions.
 */

350 351
/**
 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
352
 * @xprt: transport with IA to (re)initialize
353 354 355
 *
 * Returns 0 on success, negative errno if an appropriate
 * Interface Adapter could not be found and opened.
356 357
 */
int
358
rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
359 360
{
	struct rpcrdma_ia *ia = &xprt->rx_ia;
361 362
	int rc;

363
	ia->ri_id = rpcrdma_create_id(xprt, ia);
364 365
	if (IS_ERR(ia->ri_id)) {
		rc = PTR_ERR(ia->ri_id);
366
		goto out_err;
367 368
	}

369
	ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0);
370 371
	if (IS_ERR(ia->ri_pd)) {
		rc = PTR_ERR(ia->ri_pd);
372
		pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
373
		goto out_err;
374 375
	}

376
	switch (xprt_rdma_memreg_strategy) {
377
	case RPCRDMA_FRWR:
378
		if (frwr_is_supported(ia->ri_id->device))
379 380
			break;
		/*FALLTHROUGH*/
381
	default:
382
		pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
383
		       ia->ri_id->device->name, xprt_rdma_memreg_strategy);
384
		rc = -EINVAL;
385
		goto out_err;
386 387 388
	}

	return 0;
389

390 391
out_err:
	rpcrdma_ia_close(ia);
392 393 394
	return rc;
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
/**
 * rpcrdma_ia_remove - Handle device driver unload
 * @ia: interface adapter being removed
 *
 * Divest transport H/W resources associated with this adapter,
 * but allow it to be restored later.
 */
void
rpcrdma_ia_remove(struct rpcrdma_ia *ia)
{
	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
						   rx_ia);
	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_req *req;
	struct rpcrdma_rep *rep;

	cancel_delayed_work_sync(&buf->rb_refresh_worker);

	/* This is similar to rpcrdma_ep_destroy, but:
	 * - Don't cancel the connect worker.
	 * - Don't call rpcrdma_ep_disconnect, which waits
	 *   for another conn upcall, which will deadlock.
	 * - rdma_disconnect is unneeded, the underlying
	 *   connection is already gone.
	 */
	if (ia->ri_id->qp) {
422
		rpcrdma_xprt_drain(r_xprt);
423 424 425 426
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
	}
	ib_free_cq(ep->rep_attr.recv_cq);
427
	ep->rep_attr.recv_cq = NULL;
428
	ib_free_cq(ep->rep_attr.send_cq);
429
	ep->rep_attr.send_cq = NULL;
430 431 432 433 434

	/* The ULP is responsible for ensuring all DMA
	 * mappings and MRs are gone.
	 */
	list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
C
Chuck Lever 已提交
435
		rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
436
	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
C
Chuck Lever 已提交
437 438 439
		rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
		rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
		rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
440
	}
C
Chuck Lever 已提交
441
	rpcrdma_mrs_destroy(buf);
442 443
	ib_dealloc_pd(ia->ri_pd);
	ia->ri_pd = NULL;
444 445 446

	/* Allow waiters to continue */
	complete(&ia->ri_remove_done);
447 448

	trace_xprtrdma_remove(r_xprt);
449 450
}

451 452 453 454
/**
 * rpcrdma_ia_close - Clean up/close an IA.
 * @ia: interface adapter to close
 *
455 456 457 458
 */
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
459 460 461
	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
		if (ia->ri_id->qp)
			rdma_destroy_qp(ia->ri_id);
462
		rdma_destroy_id(ia->ri_id);
463
	}
464
	ia->ri_id = NULL;
465 466 467

	/* If the pd is still busy, xprtrdma missed freeing a resource */
	if (ia->ri_pd && !IS_ERR(ia->ri_pd))
468
		ib_dealloc_pd(ia->ri_pd);
469
	ia->ri_pd = NULL;
470 471
}

472 473 474 475 476
/**
 * rpcrdma_ep_create - Create unconnected endpoint
 * @r_xprt: transport to instantiate
 *
 * Returns zero on success, or a negative errno.
477
 */
478
int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
479
{
480 481
	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
482
	struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
483
	struct ib_cq *sendcq, *recvcq;
484
	unsigned int max_sge;
485
	int rc;
486

487
	ep->rep_max_requests = xprt_rdma_slot_table_entries;
488 489 490
	ep->rep_inline_send = xprt_rdma_max_inline_write;
	ep->rep_inline_recv = xprt_rdma_max_inline_read;

491
	max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge,
492
			RPCRDMA_MAX_SEND_SGES);
493 494
	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
		pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
495 496
		return -ENOMEM;
	}
497
	ia->ri_max_send_sges = max_sge;
498

499
	rc = frwr_open(ia, ep);
500 501
	if (rc)
		return rc;
502

503
	ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
504 505
	ep->rep_attr.qp_context = ep;
	ep->rep_attr.srq = NULL;
506
	ep->rep_attr.cap.max_send_sge = max_sge;
507 508 509 510 511 512 513 514 515 516 517 518 519 520
	ep->rep_attr.cap.max_recv_sge = 1;
	ep->rep_attr.cap.max_inline_data = 0;
	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	ep->rep_attr.qp_type = IB_QPT_RC;
	ep->rep_attr.port_num = ~0;

	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
		"iovs: send %d recv %d\n",
		__func__,
		ep->rep_attr.cap.max_send_wr,
		ep->rep_attr.cap.max_recv_wr,
		ep->rep_attr.cap.max_send_sge,
		ep->rep_attr.cap.max_recv_sge);

521
	ep->rep_send_batch = ep->rep_max_requests >> 3;
522
	ep->rep_send_count = ep->rep_send_batch;
523
	init_waitqueue_head(&ep->rep_connect_wait);
524
	ep->rep_receive_count = 0;
525

526
	sendcq = ib_alloc_cq(ia->ri_id->device, NULL,
527
			     ep->rep_attr.cap.max_send_wr + 1,
528
			     ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0,
529
			     IB_POLL_WORKQUEUE);
530 531
	if (IS_ERR(sendcq)) {
		rc = PTR_ERR(sendcq);
532 533 534
		goto out1;
	}

535
	recvcq = ib_alloc_cq(ia->ri_id->device, NULL,
536
			     ep->rep_attr.cap.max_recv_wr + 1,
537
			     0, IB_POLL_WORKQUEUE);
538 539 540 541 542 543 544
	if (IS_ERR(recvcq)) {
		rc = PTR_ERR(recvcq);
		goto out2;
	}

	ep->rep_attr.send_cq = sendcq;
	ep->rep_attr.recv_cq = recvcq;
545 546

	/* Initialize cma parameters */
547
	memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
548

549 550 551
	/* Prepare RDMA-CM private message */
	pmsg->cp_magic = rpcrdma_cmp_magic;
	pmsg->cp_version = RPCRDMA_CMP_VERSION;
552
	pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
553 554
	pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send);
	pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv);
555 556
	ep->rep_remote_cma.private_data = pmsg;
	ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
557 558

	/* Client offers RDMA Read but does not initiate */
559
	ep->rep_remote_cma.initiator_depth = 0;
560
	ep->rep_remote_cma.responder_resources =
561
		min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom);
562

563 564 565 566 567 568 569 570 571 572
	/* Limit transport retries so client can detect server
	 * GID changes quickly. RPC layer handles re-establishing
	 * transport connection and retransmission.
	 */
	ep->rep_remote_cma.retry_count = 6;

	/* RPC-over-RDMA handles its own flow control. In addition,
	 * make all RNR NAKs visible so we know that RPC-over-RDMA
	 * flow control is working correctly (no NAKs should be seen).
	 */
573 574 575 576 577 578
	ep->rep_remote_cma.flow_control = 0;
	ep->rep_remote_cma.rnr_retry_count = 0;

	return 0;

out2:
579
	ib_free_cq(sendcq);
580 581 582 583
out1:
	return rc;
}

584 585 586
/**
 * rpcrdma_ep_destroy - Disconnect and destroy endpoint.
 * @r_xprt: transport instance to shut down
587 588
 *
 */
589
void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt)
590
{
591 592 593
	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;

594
	if (ia->ri_id && ia->ri_id->qp) {
595
		rpcrdma_ep_disconnect(ep, ia);
596 597
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
598 599
	}

600 601 602 603
	if (ep->rep_attr.recv_cq)
		ib_free_cq(ep->rep_attr.recv_cq);
	if (ep->rep_attr.send_cq)
		ib_free_cq(ep->rep_attr.send_cq);
604 605
}

606 607 608 609 610 611 612 613 614 615
/* Re-establish a connection after a device removal event.
 * Unlike a normal reconnection, a fresh PD and a new set
 * of MRs and buffers is needed.
 */
static int
rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
			 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc, err;

616
	trace_xprtrdma_reinsert(r_xprt);
617 618

	rc = -EHOSTUNREACH;
619
	if (rpcrdma_ia_open(r_xprt))
620 621 622
		goto out1;

	rc = -ENOMEM;
623
	err = rpcrdma_ep_create(r_xprt);
624 625 626 627 628 629 630 631 632 633 634 635
	if (err) {
		pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
		goto out2;
	}

	rc = -ENETUNREACH;
	err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
	if (err) {
		pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
		goto out3;
	}

C
Chuck Lever 已提交
636
	rpcrdma_mrs_create(r_xprt);
637 638 639
	return 0;

out3:
640
	rpcrdma_ep_destroy(r_xprt);
641 642 643 644 645 646
out2:
	rpcrdma_ia_close(ia);
out1:
	return rc;
}

647 648 649 650 651 652 653
static int
rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
		     struct rpcrdma_ia *ia)
{
	struct rdma_cm_id *id, *old;
	int err, rc;

654
	trace_xprtrdma_reconnect(r_xprt);
655 656 657 658

	rpcrdma_ep_disconnect(ep, ia);

	rc = -EHOSTUNREACH;
659
	id = rpcrdma_create_id(r_xprt, ia);
660 661 662 663 664 665 666 667 668 669 670 671 672
	if (IS_ERR(id))
		goto out;

	/* As long as the new ID points to the same device as the
	 * old ID, we can reuse the transport's existing PD and all
	 * previously allocated MRs. Also, the same device means
	 * the transport's previous DMA mappings are still valid.
	 *
	 * This is a sanity check only. There should be no way these
	 * point to two different devices here.
	 */
	old = id;
	rc = -ENETUNREACH;
673
	if (ia->ri_id->device != id->device) {
674 675 676 677 678
		pr_err("rpcrdma: can't reconnect on different device!\n");
		goto out_destroy;
	}

	err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
679
	if (err)
680 681 682 683 684 685 686 687 688
		goto out_destroy;

	/* Atomically replace the transport's ID and QP. */
	rc = 0;
	old = ia->ri_id;
	ia->ri_id = id;
	rdma_destroy_qp(old);

out_destroy:
689
	rdma_destroy_id(old);
690 691 692 693
out:
	return rc;
}

694 695 696 697 698 699
/*
 * Connect unconnected endpoint.
 */
int
rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
700 701
	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
						   rx_ia);
702
	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
703
	int rc;
704 705

retry:
706 707
	switch (ep->rep_connected) {
	case 0:
708 709 710
		dprintk("RPC:       %s: connecting...\n", __func__);
		rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
711 712
			rc = -ENETUNREACH;
			goto out_noupdate;
713
		}
714
		break;
715 716 717 718 719
	case -ENODEV:
		rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
		if (rc)
			goto out_noupdate;
		break;
720 721 722 723
	default:
		rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
		if (rc)
			goto out;
724 725 726
	}

	ep->rep_connected = 0;
727 728
	xprt_clear_connected(xprt);

729
	rpcrdma_post_recvs(r_xprt, true);
730 731

	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
732
	if (rc)
733 734 735 736
		goto out;

	wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
	if (ep->rep_connected <= 0) {
737
		if (ep->rep_connected == -EAGAIN)
738 739
			goto retry;
		rc = ep->rep_connected;
740
		goto out;
741 742
	}

743
	dprintk("RPC:       %s: connected\n", __func__);
744

745 746 747
out:
	if (rc)
		ep->rep_connected = rc;
748 749

out_noupdate:
750 751 752
	return rc;
}

753 754 755 756
/**
 * rpcrdma_ep_disconnect - Disconnect underlying transport
 * @ep: endpoint to disconnect
 * @ia: associated interface adapter
757 758 759 760 761 762 763
 *
 * This is separate from destroy to facilitate the ability
 * to reconnect without recreating the endpoint.
 *
 * This call is not reentrant, and must not be made in parallel
 * on the same endpoint.
 */
764
void
765 766
rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
767 768
	struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
						   rx_ep);
769 770
	int rc;

771
	/* returns without wait if ID is not connected */
772
	rc = rdma_disconnect(ia->ri_id);
773
	if (!rc)
774 775
		wait_event_interruptible(ep->rep_connect_wait,
							ep->rep_connected != 1);
776
	else
777
		ep->rep_connected = rc;
778
	trace_xprtrdma_disconnect(r_xprt, rc);
779

780
	rpcrdma_xprt_drain(r_xprt);
781 782
}

783 784 785 786 787 788 789 790 791 792 793 794 795 796
/* Fixed-size circular FIFO queue. This implementation is wait-free and
 * lock-free.
 *
 * Consumer is the code path that posts Sends. This path dequeues a
 * sendctx for use by a Send operation. Multiple consumer threads
 * are serialized by the RPC transport lock, which allows only one
 * ->send_request call at a time.
 *
 * Producer is the code path that handles Send completions. This path
 * enqueues a sendctx that has been completed. Multiple producer
 * threads are serialized by the ib_poll_cq() function.
 */

/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
797 798
 * queue activity, and rpcrdma_xprt_drain has flushed all remaining
 * Send requests.
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
 */
static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
{
	unsigned long i;

	for (i = 0; i <= buf->rb_sc_last; i++)
		kfree(buf->rb_sc_ctxs[i]);
	kfree(buf->rb_sc_ctxs);
}

static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
{
	struct rpcrdma_sendctx *sc;

	sc = kzalloc(sizeof(*sc) +
		     ia->ri_max_send_sges * sizeof(struct ib_sge),
		     GFP_KERNEL);
	if (!sc)
		return NULL;

	sc->sc_wr.wr_cqe = &sc->sc_cqe;
	sc->sc_wr.sg_list = sc->sc_sges;
	sc->sc_wr.opcode = IB_WR_SEND;
	sc->sc_cqe.done = rpcrdma_wc_send;
	return sc;
}

static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_sendctx *sc;
	unsigned long i;

	/* Maximum number of concurrent outstanding Send WRs. Capping
	 * the circular queue size stops Send Queue overflow by causing
	 * the ->send_request call to fail temporarily before too many
	 * Sends are posted.
	 */
	i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
	dprintk("RPC:       %s: allocating %lu send_ctxs\n", __func__, i);
	buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
	if (!buf->rb_sc_ctxs)
		return -ENOMEM;

	buf->rb_sc_last = i - 1;
	for (i = 0; i <= buf->rb_sc_last; i++) {
		sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
		if (!sc)
847
			return -ENOMEM;
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867

		sc->sc_xprt = r_xprt;
		buf->rb_sc_ctxs[i] = sc;
	}

	return 0;
}

/* The sendctx queue is not guaranteed to have a size that is a
 * power of two, thus the helpers in circ_buf.h cannot be used.
 * The other option is to use modulus (%), which can be expensive.
 */
static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
					  unsigned long item)
{
	return likely(item < buf->rb_sc_last) ? item + 1 : 0;
}

/**
 * rpcrdma_sendctx_get_locked - Acquire a send context
868
 * @r_xprt: controlling transport instance
869 870 871 872 873 874
 *
 * Returns pointer to a free send completion context; or NULL if
 * the queue is empty.
 *
 * Usage: Called to acquire an SGE array before preparing a Send WR.
 *
875 876
 * The caller serializes calls to this function (per transport), and
 * provides an effective memory barrier that flushes the new value
877 878
 * of rb_sc_head.
 */
879
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
880
{
881
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	struct rpcrdma_sendctx *sc;
	unsigned long next_head;

	next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);

	if (next_head == READ_ONCE(buf->rb_sc_tail))
		goto out_emptyq;

	/* ORDER: item must be accessed _before_ head is updated */
	sc = buf->rb_sc_ctxs[next_head];

	/* Releasing the lock in the caller acts as a memory
	 * barrier that flushes rb_sc_head.
	 */
	buf->rb_sc_head = next_head;

	return sc;

out_emptyq:
	/* The queue is "empty" if there have not been enough Send
	 * completions recently. This is a sign the Send Queue is
	 * backing up. Cause the caller to pause and try again.
	 */
905
	set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
906 907 908 909 910 911 912 913 914 915 916
	r_xprt->rx_stats.empty_sendctx_q++;
	return NULL;
}

/**
 * rpcrdma_sendctx_put_locked - Release a send context
 * @sc: send context to release
 *
 * Usage: Called from Send completion to return a sendctxt
 * to the queue.
 *
917
 * The caller serializes calls to this function (per transport).
918
 */
919 920
static void
rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
921 922 923 924
{
	struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
	unsigned long next_tail;

925
	/* Unmap SGEs of previously completed but unsignaled
926 927 928 929 930 931 932
	 * Sends by walking up the queue until @sc is found.
	 */
	next_tail = buf->rb_sc_tail;
	do {
		next_tail = rpcrdma_sendctx_next(buf, next_tail);

		/* ORDER: item must be accessed _before_ tail is updated */
933
		rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
934 935 936 937 938

	} while (buf->rb_sc_ctxs[next_tail] != sc);

	/* Paired with READ_ONCE */
	smp_store_release(&buf->rb_sc_tail, next_tail);
939 940 941 942 943

	if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
		smp_mb__after_atomic();
		xprt_write_space(&sc->sc_xprt->rx_xprt);
	}
944 945
}

C
Chuck Lever 已提交
946
static void
C
Chuck Lever 已提交
947
rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
C
Chuck Lever 已提交
948 949 950 951 952 953 954
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	unsigned int count;
	LIST_HEAD(free);
	LIST_HEAD(all);

C
Chuck Lever 已提交
955
	for (count = 0; count < ia->ri_max_segs; count++) {
C
Chuck Lever 已提交
956
		struct rpcrdma_mr *mr;
C
Chuck Lever 已提交
957 958
		int rc;

C
Chuck Lever 已提交
959 960
		mr = kzalloc(sizeof(*mr), GFP_KERNEL);
		if (!mr)
C
Chuck Lever 已提交
961 962
			break;

963
		rc = frwr_init_mr(ia, mr);
C
Chuck Lever 已提交
964
		if (rc) {
C
Chuck Lever 已提交
965
			kfree(mr);
C
Chuck Lever 已提交
966 967 968
			break;
		}

C
Chuck Lever 已提交
969
		mr->mr_xprt = r_xprt;
C
Chuck Lever 已提交
970

C
Chuck Lever 已提交
971 972
		list_add(&mr->mr_list, &free);
		list_add(&mr->mr_all, &all);
C
Chuck Lever 已提交
973 974
	}

C
Chuck Lever 已提交
975 976
	spin_lock(&buf->rb_mrlock);
	list_splice(&free, &buf->rb_mrs);
C
Chuck Lever 已提交
977 978
	list_splice(&all, &buf->rb_all);
	r_xprt->rx_stats.mrs_allocated += count;
C
Chuck Lever 已提交
979
	spin_unlock(&buf->rb_mrlock);
980
	trace_xprtrdma_createmrs(r_xprt, count);
981 982

	xprt_write_space(&r_xprt->rx_xprt);
C
Chuck Lever 已提交
983 984 985 986 987 988 989 990 991 992
}

static void
rpcrdma_mr_refresh_worker(struct work_struct *work)
{
	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
						  rb_refresh_worker.work);
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);

C
Chuck Lever 已提交
993
	rpcrdma_mrs_create(r_xprt);
C
Chuck Lever 已提交
994 995
}

996 997 998
/**
 * rpcrdma_req_create - Allocate an rpcrdma_req object
 * @r_xprt: controlling r_xprt
999
 * @size: initial size, in bytes, of send and receive buffers
1000 1001 1002 1003
 * @flags: GFP flags passed to memory allocators
 *
 * Returns an allocated and fully initialized rpcrdma_req or NULL.
 */
1004 1005
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
				       gfp_t flags)
1006
{
1007
	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1008
	struct rpcrdma_regbuf *rb;
1009 1010
	struct rpcrdma_req *req;

1011
	req = kzalloc(sizeof(*req), flags);
1012
	if (req == NULL)
1013
		goto out1;
1014

C
Chuck Lever 已提交
1015
	rb = rpcrdma_regbuf_alloc(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags);
1016 1017
	if (!rb)
		goto out2;
1018
	req->rl_rdmabuf = rb;
C
Chuck Lever 已提交
1019
	xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
1020

C
Chuck Lever 已提交
1021
	req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
1022 1023 1024
	if (!req->rl_sendbuf)
		goto out3;

C
Chuck Lever 已提交
1025
	req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
1026 1027 1028
	if (!req->rl_recvbuf)
		goto out4;

1029 1030
	req->rl_buffer = buffer;
	INIT_LIST_HEAD(&req->rl_registered);
1031
	spin_lock(&buffer->rb_lock);
1032
	list_add(&req->rl_all, &buffer->rb_allreqs);
1033
	spin_unlock(&buffer->rb_lock);
1034
	return req;
1035 1036 1037 1038 1039 1040 1041 1042 1043

out4:
	kfree(req->rl_sendbuf);
out3:
	kfree(req->rl_rdmabuf);
out2:
	kfree(req);
out1:
	return NULL;
1044 1045
}

1046
static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp)
1047
{
1048
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1049 1050
	struct rpcrdma_rep *rep;

1051
	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1052 1053 1054
	if (rep == NULL)
		goto out;

1055
	rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv,
1056
					       DMA_FROM_DEVICE, GFP_KERNEL);
C
Chuck Lever 已提交
1057
	if (!rep->rr_rdmabuf)
1058
		goto out_free;
C
Chuck Lever 已提交
1059
	xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
1060
		     rdmab_length(rep->rr_rdmabuf));
1061

1062
	rep->rr_cqe.done = rpcrdma_wc_receive;
1063
	rep->rr_rxprt = r_xprt;
1064
	INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
1065 1066 1067 1068
	rep->rr_recv_wr.next = NULL;
	rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
	rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
	rep->rr_recv_wr.num_sge = 1;
1069
	rep->rr_temp = temp;
1070 1071 1072 1073

	spin_lock(&buf->rb_lock);
	list_add(&rep->rr_list, &buf->rb_recv_bufs);
	spin_unlock(&buf->rb_lock);
1074
	return true;
1075 1076 1077 1078

out_free:
	kfree(rep);
out:
1079
	return false;
1080 1081
}

1082 1083 1084 1085 1086 1087 1088
/**
 * rpcrdma_buffer_create - Create initial set of req/rep objects
 * @r_xprt: transport instance to (re)initialize
 *
 * Returns zero on success, otherwise a negative errno.
 */
int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1089
{
1090
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1091 1092
	int i, rc;

1093
	buf->rb_flags = 0;
1094
	buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
1095
	buf->rb_bc_srv_max_requests = 0;
C
Chuck Lever 已提交
1096
	spin_lock_init(&buf->rb_mrlock);
1097
	spin_lock_init(&buf->rb_lock);
C
Chuck Lever 已提交
1098
	INIT_LIST_HEAD(&buf->rb_mrs);
C
Chuck Lever 已提交
1099 1100 1101
	INIT_LIST_HEAD(&buf->rb_all);
	INIT_DELAYED_WORK(&buf->rb_refresh_worker,
			  rpcrdma_mr_refresh_worker);
1102

C
Chuck Lever 已提交
1103
	rpcrdma_mrs_create(r_xprt);
1104

1105
	INIT_LIST_HEAD(&buf->rb_send_bufs);
1106
	INIT_LIST_HEAD(&buf->rb_allreqs);
1107 1108

	rc = -ENOMEM;
1109 1110 1111
	for (i = 0; i < buf->rb_max_requests; i++) {
		struct rpcrdma_req *req;

1112 1113
		req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE,
					 GFP_KERNEL);
1114
		if (!req)
1115
			goto out;
1116
		list_add(&req->rl_list, &buf->rb_send_bufs);
1117 1118
	}

1119
	buf->rb_credits = 1;
1120
	INIT_LIST_HEAD(&buf->rb_recv_bufs);
1121

1122 1123 1124 1125
	rc = rpcrdma_sendctxs_create(r_xprt);
	if (rc)
		goto out;

1126 1127 1128 1129
	buf->rb_completion_wq = alloc_workqueue("rpcrdma-%s",
						WQ_MEM_RECLAIM | WQ_HIGHPRI,
						0,
			r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
1130 1131
	if (!buf->rb_completion_wq) {
		rc = -ENOMEM;
1132
		goto out;
1133
	}
1134

1135 1136 1137 1138 1139 1140
	return 0;
out:
	rpcrdma_buffer_destroy(buf);
	return rc;
}

1141
static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
1142
{
C
Chuck Lever 已提交
1143
	rpcrdma_regbuf_free(rep->rr_rdmabuf);
1144 1145 1146
	kfree(rep);
}

1147 1148 1149 1150 1151 1152 1153
/**
 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
 * @req: unused object to be destroyed
 *
 * This function assumes that the caller prevents concurrent device
 * unload and transport tear-down.
 */
1154
void
1155
rpcrdma_req_destroy(struct rpcrdma_req *req)
1156
{
1157 1158
	list_del(&req->rl_all);

C
Chuck Lever 已提交
1159 1160 1161
	rpcrdma_regbuf_free(req->rl_recvbuf);
	rpcrdma_regbuf_free(req->rl_sendbuf);
	rpcrdma_regbuf_free(req->rl_rdmabuf);
1162 1163 1164
	kfree(req);
}

C
Chuck Lever 已提交
1165
static void
C
Chuck Lever 已提交
1166
rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
C
Chuck Lever 已提交
1167 1168 1169
{
	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
						   rx_buf);
C
Chuck Lever 已提交
1170
	struct rpcrdma_mr *mr;
C
Chuck Lever 已提交
1171 1172 1173
	unsigned int count;

	count = 0;
C
Chuck Lever 已提交
1174
	spin_lock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1175
	while (!list_empty(&buf->rb_all)) {
C
Chuck Lever 已提交
1176 1177
		mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
		list_del(&mr->mr_all);
C
Chuck Lever 已提交
1178

C
Chuck Lever 已提交
1179
		spin_unlock(&buf->rb_mrlock);
1180 1181 1182 1183 1184

		/* Ensure MW is not on any rl_registered list */
		if (!list_empty(&mr->mr_list))
			list_del(&mr->mr_list);

1185
		frwr_release_mr(mr);
C
Chuck Lever 已提交
1186
		count++;
C
Chuck Lever 已提交
1187
		spin_lock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1188
	}
C
Chuck Lever 已提交
1189
	spin_unlock(&buf->rb_mrlock);
C
Chuck Lever 已提交
1190 1191 1192 1193 1194
	r_xprt->rx_stats.mrs_allocated = 0;

	dprintk("RPC:       %s: released %u MRs\n", __func__, count);
}

1195 1196 1197 1198
/**
 * rpcrdma_buffer_destroy - Release all hw resources
 * @buf: root control block for resources
 *
1199
 * ORDERING: relies on a prior rpcrdma_xprt_drain :
1200 1201 1202
 * - No more Send or Receive completions can occur
 * - All MRs, reps, and reqs are returned to their free lists
 */
1203 1204 1205
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
1206
	cancel_delayed_work_sync(&buf->rb_refresh_worker);
1207

1208 1209 1210 1211 1212
	if (buf->rb_completion_wq) {
		destroy_workqueue(buf->rb_completion_wq);
		buf->rb_completion_wq = NULL;
	}

1213 1214
	rpcrdma_sendctxs_destroy(buf);

1215 1216
	while (!list_empty(&buf->rb_recv_bufs)) {
		struct rpcrdma_rep *rep;
1217

1218 1219 1220
		rep = list_first_entry(&buf->rb_recv_bufs,
				       struct rpcrdma_rep, rr_list);
		list_del(&rep->rr_list);
1221
		rpcrdma_rep_destroy(rep);
1222 1223
	}

1224
	while (!list_empty(&buf->rb_send_bufs)) {
1225
		struct rpcrdma_req *req;
A
Allen Andrews 已提交
1226

1227 1228 1229 1230
		req = list_first_entry(&buf->rb_send_bufs,
				       struct rpcrdma_req, rl_list);
		list_del(&req->rl_list);
		rpcrdma_req_destroy(req);
1231
	}
A
Allen Andrews 已提交
1232

C
Chuck Lever 已提交
1233
	rpcrdma_mrs_destroy(buf);
1234 1235
}

C
Chuck Lever 已提交
1236 1237 1238 1239 1240 1241 1242 1243 1244
/**
 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
 * @r_xprt: controlling transport
 *
 * Returns an initialized rpcrdma_mr or NULL if no free
 * rpcrdma_mr objects are available.
 */
struct rpcrdma_mr *
rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1245
{
1246
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
C
Chuck Lever 已提交
1247
	struct rpcrdma_mr *mr = NULL;
1248

C
Chuck Lever 已提交
1249 1250 1251 1252
	spin_lock(&buf->rb_mrlock);
	if (!list_empty(&buf->rb_mrs))
		mr = rpcrdma_mr_pop(&buf->rb_mrs);
	spin_unlock(&buf->rb_mrlock);
1253

C
Chuck Lever 已提交
1254 1255 1256
	if (!mr)
		goto out_nomrs;
	return mr;
C
Chuck Lever 已提交
1257

C
Chuck Lever 已提交
1258
out_nomrs:
1259
	trace_xprtrdma_nomrs(r_xprt);
1260 1261
	if (r_xprt->rx_ep.rep_connected != -ENODEV)
		schedule_delayed_work(&buf->rb_refresh_worker, 0);
C
Chuck Lever 已提交
1262 1263 1264 1265 1266

	/* Allow the reply handler and refresh worker to run */
	cond_resched();

	return NULL;
1267 1268
}

1269 1270 1271 1272 1273 1274 1275 1276
static void
__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
{
	spin_lock(&buf->rb_mrlock);
	rpcrdma_mr_push(mr, &buf->rb_mrs);
	spin_unlock(&buf->rb_mrlock);
}

C
Chuck Lever 已提交
1277 1278 1279 1280 1281
/**
 * rpcrdma_mr_put - Release an rpcrdma_mr object
 * @mr: object to release
 *
 */
1282
void
C
Chuck Lever 已提交
1283
rpcrdma_mr_put(struct rpcrdma_mr *mr)
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
{
	__rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
}

/**
 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
 * @mr: object to release
 *
 */
void
rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
1295
{
C
Chuck Lever 已提交
1296
	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1297

1298 1299
	if (mr->mr_dir != DMA_NONE) {
		trace_xprtrdma_mr_unmap(mr);
1300
		ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
1301 1302 1303
				mr->mr_sg, mr->mr_nents, mr->mr_dir);
		mr->mr_dir = DMA_NONE;
	}
1304
	__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
1305 1306
}

1307 1308 1309
/**
 * rpcrdma_buffer_get - Get a request buffer
 * @buffers: Buffer pool from which to obtain a buffer
1310
 *
1311
 * Returns a fresh rpcrdma_req, or NULL if none are available.
1312 1313 1314 1315 1316
 */
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{
	struct rpcrdma_req *req;
1317

1318
	spin_lock(&buffers->rb_lock);
1319 1320 1321 1322
	req = list_first_entry_or_null(&buffers->rb_send_bufs,
				       struct rpcrdma_req, rl_list);
	if (req)
		list_del_init(&req->rl_list);
1323
	spin_unlock(&buffers->rb_lock);
1324
	return req;
1325 1326
}

1327 1328 1329 1330
/**
 * rpcrdma_buffer_put - Put request/reply buffers back into pool
 * @req: object to return
 *
1331 1332 1333 1334 1335
 */
void
rpcrdma_buffer_put(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
1336
	struct rpcrdma_rep *rep = req->rl_reply;
1337

1338 1339
	req->rl_reply = NULL;

1340
	spin_lock(&buffers->rb_lock);
1341
	list_add(&req->rl_list, &buffers->rb_send_bufs);
1342
	if (rep) {
1343 1344 1345 1346
		if (!rep->rr_temp) {
			list_add(&rep->rr_list, &buffers->rb_recv_bufs);
			rep = NULL;
		}
1347
	}
1348
	spin_unlock(&buffers->rb_lock);
1349
	if (rep)
1350
		rpcrdma_rep_destroy(rep);
1351 1352 1353 1354
}

/*
 * Put reply buffers back into pool when not attached to
1355
 * request. This happens in error conditions.
1356 1357 1358 1359
 */
void
rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
1360
	struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1361

1362 1363 1364 1365 1366
	if (!rep->rr_temp) {
		spin_lock(&buffers->rb_lock);
		list_add(&rep->rr_list, &buffers->rb_recv_bufs);
		spin_unlock(&buffers->rb_lock);
	} else {
1367
		rpcrdma_rep_destroy(rep);
1368
	}
1369 1370
}

C
Chuck Lever 已提交
1371
/* Returns a pointer to a rpcrdma_regbuf object, or NULL.
1372 1373
 *
 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1374
 * receiving the payload of RDMA RECV operations. During Long Calls
1375
 * or Replies they may be registered externally via frwr_map.
1376
 */
C
Chuck Lever 已提交
1377 1378
static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1379
		     gfp_t flags)
1380 1381 1382
{
	struct rpcrdma_regbuf *rb;

C
Chuck Lever 已提交
1383 1384 1385 1386 1387 1388 1389 1390
	rb = kmalloc(sizeof(*rb), flags);
	if (!rb)
		return NULL;
	rb->rg_data = kmalloc(size, flags);
	if (!rb->rg_data) {
		kfree(rb);
		return NULL;
	}
1391

1392
	rb->rg_device = NULL;
1393
	rb->rg_direction = direction;
1394
	rb->rg_iov.length = size;
1395
	return rb;
1396
}
1397

1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
/**
 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
 * @rb: regbuf to reallocate
 * @size: size of buffer to be allocated, in bytes
 * @flags: GFP flags
 *
 * Returns true if reallocation was successful. If false is
 * returned, @rb is left untouched.
 */
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
{
	void *buf;

	buf = kmalloc(size, flags);
	if (!buf)
		return false;

C
Chuck Lever 已提交
1415
	rpcrdma_regbuf_dma_unmap(rb);
1416 1417 1418 1419 1420 1421 1422
	kfree(rb->rg_data);

	rb->rg_data = buf;
	rb->rg_iov.length = size;
	return true;
}

1423
/**
C
Chuck Lever 已提交
1424 1425
 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
 * @r_xprt: controlling transport instance
1426
 * @rb: regbuf to be mapped
C
Chuck Lever 已提交
1427 1428
 *
 * Returns true if the buffer is now DMA mapped to @r_xprt's device
1429
 */
C
Chuck Lever 已提交
1430 1431
bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
			      struct rpcrdma_regbuf *rb)
1432
{
1433
	struct ib_device *device = r_xprt->rx_ia.ri_id->device;
1434

1435 1436 1437
	if (rb->rg_direction == DMA_NONE)
		return false;

C
Chuck Lever 已提交
1438 1439
	rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
					    rdmab_length(rb), rb->rg_direction);
1440 1441
	if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
		trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1442
		return false;
1443
	}
1444

1445
	rb->rg_device = device;
C
Chuck Lever 已提交
1446
	rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey;
1447 1448 1449
	return true;
}

C
Chuck Lever 已提交
1450
static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1451
{
1452 1453 1454
	if (!rb)
		return;

1455 1456 1457
	if (!rpcrdma_regbuf_is_mapped(rb))
		return;

C
Chuck Lever 已提交
1458 1459
	ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
			    rb->rg_direction);
1460
	rb->rg_device = NULL;
1461 1462
}

C
Chuck Lever 已提交
1463
static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1464
{
C
Chuck Lever 已提交
1465
	rpcrdma_regbuf_dma_unmap(rb);
C
Chuck Lever 已提交
1466 1467
	if (rb)
		kfree(rb->rg_data);
1468
	kfree(rb);
1469 1470
}

1471 1472 1473 1474 1475
/**
 * rpcrdma_ep_post - Post WRs to a transport's Send Queue
 * @ia: transport's device information
 * @ep: transport's RDMA endpoint information
 * @req: rpcrdma_req containing the Send WR to post
1476
 *
1477 1478
 * Returns 0 if the post was successful, otherwise -ENOTCONN
 * is returned.
1479 1480 1481 1482 1483 1484
 */
int
rpcrdma_ep_post(struct rpcrdma_ia *ia,
		struct rpcrdma_ep *ep,
		struct rpcrdma_req *req)
{
1485
	struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
1486
	int rc;
1487

1488 1489
	if (!ep->rep_send_count ||
	    test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1490 1491 1492 1493 1494 1495
		send_wr->send_flags |= IB_SEND_SIGNALED;
		ep->rep_send_count = ep->rep_send_batch;
	} else {
		send_wr->send_flags &= ~IB_SEND_SIGNALED;
		--ep->rep_send_count;
	}
1496

1497
	rc = frwr_send(ia, req);
1498
	trace_xprtrdma_post_send(req, rc);
1499
	if (rc)
1500
		return -ENOTCONN;
1501
	return 0;
1502 1503
}

1504
static void
1505
rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
1506
{
1507
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1508
	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
1509 1510
	struct ib_recv_wr *wr, *bad_wr;
	int needed, count, rc;
1511

1512 1513
	rc = 0;
	count = 0;
1514
	needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1515
	if (ep->rep_receive_count > needed)
1516
		goto out;
1517
	needed -= ep->rep_receive_count;
1518 1519
	if (!temp)
		needed += RPCRDMA_MAX_RECV_BATCH;
1520

1521 1522 1523 1524 1525
	count = 0;
	wr = NULL;
	while (needed) {
		struct rpcrdma_regbuf *rb;
		struct rpcrdma_rep *rep;
1526

1527 1528 1529 1530 1531 1532 1533
		spin_lock(&buf->rb_lock);
		rep = list_first_entry_or_null(&buf->rb_recv_bufs,
					       struct rpcrdma_rep, rr_list);
		if (likely(rep))
			list_del(&rep->rr_list);
		spin_unlock(&buf->rb_lock);
		if (!rep) {
1534
			if (!rpcrdma_rep_create(r_xprt, temp))
1535 1536 1537
				break;
			continue;
		}
1538

1539
		rb = rep->rr_rdmabuf;
C
Chuck Lever 已提交
1540 1541 1542
		if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) {
			rpcrdma_recv_buffer_put(rep);
			break;
1543
		}
1544

1545 1546 1547 1548 1549 1550 1551
		trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
		rep->rr_recv_wr.next = wr;
		wr = &rep->rr_recv_wr;
		++count;
		--needed;
	}
	if (!count)
1552
		goto out;
1553

1554 1555
	rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
			  (const struct ib_recv_wr **)&bad_wr);
1556 1557 1558 1559 1560 1561 1562 1563 1564
	if (rc) {
		for (wr = bad_wr; wr; wr = wr->next) {
			struct rpcrdma_rep *rep;

			rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
			rpcrdma_recv_buffer_put(rep);
			--count;
		}
	}
1565
	ep->rep_receive_count += count;
1566
out:
1567
	trace_xprtrdma_post_recvs(r_xprt, count, rc);
1568
}