verbs.c 36.5 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 39
 */

40 41 42 43 44 45 46 47 48 49
/*
 * verbs.c
 *
 * Encapsulates the major functions managing:
 *  o adapters
 *  o endpoints
 *  o connections
 *  o buffer memory
 */

50
#include <linux/interrupt.h>
51
#include <linux/slab.h>
52
#include <linux/prefetch.h>
53
#include <linux/sunrpc/addr.h>
54
#include <asm/bitops.h>
55

56 57
#include "xprt_rdma.h"

58 59 60 61
/*
 * Globals/Macros
 */

J
Jeff Layton 已提交
62
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

/*
 * internal functions
 */

/*
 * handle replies in tasklet context, using a single, global list
 * rdma tasklet function -- just turn around and call the func
 * for all replies on the list
 */

static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
static LIST_HEAD(rpcrdma_tasklets_g);

static void
rpcrdma_run_tasklet(unsigned long data)
{
	struct rpcrdma_rep *rep;
	unsigned long flags;

	data = data;
	spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	while (!list_empty(&rpcrdma_tasklets_g)) {
		rep = list_entry(rpcrdma_tasklets_g.next,
				 struct rpcrdma_rep, rr_list);
		list_del(&rep->rr_list);
		spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);

C
Chuck Lever 已提交
93
		rpcrdma_reply_handler(rep);
94 95 96 97 98 99 100 101

		spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	}
	spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
}

static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);

102 103 104 105 106 107 108 109 110 111 112
static void
rpcrdma_schedule_tasklet(struct list_head *sched_list)
{
	unsigned long flags;

	spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
	list_splice_tail(sched_list, &rpcrdma_tasklets_g);
	spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
	tasklet_schedule(&rpcrdma_tasklet_g);
}

113 114 115 116 117
static void
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
{
	struct rpcrdma_ep *ep = context;

C
Chuck Lever 已提交
118
	pr_err("RPC:       %s: %s on device %s ep %p\n",
119
	       __func__, ib_event_msg(event->event),
C
Chuck Lever 已提交
120
		event->device->name, context);
121 122
	if (ep->rep_connected == 1) {
		ep->rep_connected = -EIO;
123
		rpcrdma_conn_func(ep);
124 125 126 127 128 129 130 131 132
		wake_up_all(&ep->rep_connect_wait);
	}
}

static void
rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
{
	struct rpcrdma_ep *ep = context;

C
Chuck Lever 已提交
133
	pr_err("RPC:       %s: %s on device %s ep %p\n",
134
	       __func__, ib_event_msg(event->event),
C
Chuck Lever 已提交
135
		event->device->name, context);
136 137
	if (ep->rep_connected == 1) {
		ep->rep_connected = -EIO;
138
		rpcrdma_conn_func(ep);
139 140 141 142
		wake_up_all(&ep->rep_connect_wait);
	}
}

143 144
static void
rpcrdma_sendcq_process_wc(struct ib_wc *wc)
145
{
146
	/* WARNING: Only wr_id and status are reliable at this point */
147 148 149
	if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
		if (wc->status != IB_WC_SUCCESS &&
		    wc->status != IB_WC_WR_FLUSH_ERR)
150
			pr_err("RPC:       %s: SEND: %s\n",
151
			       __func__, ib_wc_status_msg(wc->status));
152 153 154 155
	} else {
		struct rpcrdma_mw *r;

		r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
156
		r->mw_sendcompletion(wc);
157
	}
158 159
}

160
static int
161
rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
162
{
163
	struct ib_wc *wcs;
164
	int budget, count, rc;
165

166
	budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
167 168 169 170 171 172 173 174 175 176
	do {
		wcs = ep->rep_send_wcs;

		rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
		if (rc <= 0)
			return rc;

		count = rc;
		while (count-- > 0)
			rpcrdma_sendcq_process_wc(wcs++);
177
	} while (rc == RPCRDMA_POLLSIZE && --budget);
178
	return 0;
179
}
180

181 182 183 184 185 186 187 188 189 190 191
/*
 * Handle send, fast_reg_mr, and local_inv completions.
 *
 * Send events are typically suppressed and thus do not result
 * in an upcall. Occasionally one is signaled, however. This
 * prevents the provider's completion queue from wrapping and
 * losing a completion.
 */
static void
rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
{
192
	struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
193 194
	int rc;

195
	rc = rpcrdma_sendcq_poll(cq, ep);
196 197 198 199
	if (rc) {
		dprintk("RPC:       %s: ib_poll_cq failed: %i\n",
			__func__, rc);
		return;
200 201
	}

202 203 204 205 206
	rc = ib_req_notify_cq(cq,
			IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
	if (rc == 0)
		return;
	if (rc < 0) {
207 208 209 210 211
		dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
			__func__, rc);
		return;
	}

212
	rpcrdma_sendcq_poll(cq, ep);
213 214 215
}

static void
216
rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
217 218 219 220
{
	struct rpcrdma_rep *rep =
			(struct rpcrdma_rep *)(unsigned long)wc->wr_id;

221 222 223
	/* WARNING: Only wr_id and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS)
		goto out_fail;
224

225
	/* status == SUCCESS means all fields in wc are trustworthy */
226 227 228
	if (wc->opcode != IB_WC_RECV)
		return;

229 230 231
	dprintk("RPC:       %s: rep %p opcode 'recv', length %u: success\n",
		__func__, rep, wc->byte_len);

232
	rep->rr_len = wc->byte_len;
233
	ib_dma_sync_single_for_cpu(rep->rr_device,
234 235 236
				   rdmab_addr(rep->rr_rdmabuf),
				   rep->rr_len, DMA_FROM_DEVICE);
	prefetch(rdmab_to_msg(rep->rr_rdmabuf));
237 238

out_schedule:
239
	list_add_tail(&rep->rr_list, sched_list);
240 241 242 243
	return;
out_fail:
	if (wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("RPC:       %s: rep %p: %s\n",
244
		       __func__, rep, ib_wc_status_msg(wc->status));
245 246
	rep->rr_len = ~0U;
	goto out_schedule;
247 248 249
}

static int
250
rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
251
{
252
	struct list_head sched_list;
253
	struct ib_wc *wcs;
254
	int budget, count, rc;
255

256
	INIT_LIST_HEAD(&sched_list);
257
	budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
258 259 260 261 262
	do {
		wcs = ep->rep_recv_wcs;

		rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
		if (rc <= 0)
263
			goto out_schedule;
264 265 266

		count = rc;
		while (count-- > 0)
267
			rpcrdma_recvcq_process_wc(wcs++, &sched_list);
268
	} while (rc == RPCRDMA_POLLSIZE && --budget);
269 270 271
	rc = 0;

out_schedule:
272
	rpcrdma_schedule_tasklet(&sched_list);
273
	return rc;
274 275 276
}

/*
277
 * Handle receive completions.
278 279 280 281 282 283 284 285 286 287 288
 *
 * It is reentrant but processes single events in order to maintain
 * ordering of receives to keep server credits.
 *
 * It is the responsibility of the scheduled tasklet to return
 * recv buffers to the pool. NOTE: this affects synchronization of
 * connection shutdown. That is, the structures required for
 * the completion of the reply handler must remain intact until
 * all memory has been reclaimed.
 */
static void
289
rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
290
{
291
	struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
292 293
	int rc;

294
	rc = rpcrdma_recvcq_poll(cq, ep);
295 296 297
	if (rc) {
		dprintk("RPC:       %s: ib_poll_cq failed: %i\n",
			__func__, rc);
298
		return;
299
	}
300

301 302 303 304 305
	rc = ib_req_notify_cq(cq,
			IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
	if (rc == 0)
		return;
	if (rc < 0) {
306
		dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
307 308 309 310
			__func__, rc);
		return;
	}

311
	rpcrdma_recvcq_poll(cq, ep);
312 313
}

314 315 316
static void
rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
{
317 318 319 320 321 322 323 324 325
	struct ib_wc wc;
	LIST_HEAD(sched_list);

	while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
		rpcrdma_recvcq_process_wc(&wc, &sched_list);
	if (!list_empty(&sched_list))
		rpcrdma_schedule_tasklet(&sched_list);
	while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
		rpcrdma_sendcq_process_wc(&wc);
326 327
}

328 329 330 331 332 333
static int
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
	struct rpcrdma_xprt *xprt = id->context;
	struct rpcrdma_ia *ia = &xprt->rx_ia;
	struct rpcrdma_ep *ep = &xprt->rx_ep;
J
Jeff Layton 已提交
334
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
335
	struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
336
#endif
337 338
	struct ib_qp_attr *attr = &ia->ri_qp_attr;
	struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
339 340 341 342 343
	int connstate = 0;

	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
344
		ia->ri_async_rc = 0;
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
		ia->ri_async_rc = -EHOSTUNREACH;
		dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ROUTE_ERROR:
		ia->ri_async_rc = -ENETUNREACH;
		dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
			__func__, ep);
		complete(&ia->ri_done);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		connstate = 1;
361 362 363
		ib_query_qp(ia->ri_id->qp, attr,
			    IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
			    iattr);
364 365
		dprintk("RPC:       %s: %d responder resources"
			" (%d initiator)\n",
366 367
			__func__, attr->max_dest_rd_atomic,
			attr->max_rd_atomic);
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
		goto connected;
	case RDMA_CM_EVENT_CONNECT_ERROR:
		connstate = -ENOTCONN;
		goto connected;
	case RDMA_CM_EVENT_UNREACHABLE:
		connstate = -ENETDOWN;
		goto connected;
	case RDMA_CM_EVENT_REJECTED:
		connstate = -ECONNREFUSED;
		goto connected;
	case RDMA_CM_EVENT_DISCONNECTED:
		connstate = -ECONNABORTED;
		goto connected;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		connstate = -ENODEV;
connected:
		dprintk("RPC:       %s: %sconnected\n",
					__func__, connstate > 0 ? "" : "dis");
		ep->rep_connected = connstate;
387
		rpcrdma_conn_func(ep);
388
		wake_up_all(&ep->rep_connect_wait);
389
		/*FALLTHROUGH*/
390
	default:
391 392
		dprintk("RPC:       %s: %pIS:%u (ep 0x%p): %s\n",
			__func__, sap, rpc_get_port(sap), ep,
393
			rdma_event_msg(event->event));
394 395 396
		break;
	}

J
Jeff Layton 已提交
397
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
398
	if (connstate == 1) {
399
		int ird = attr->max_dest_rd_atomic;
400
		int tird = ep->rep_remote_cma.responder_resources;
401

402
		pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
403
			sap, rpc_get_port(sap),
404
			ia->ri_device->name,
405
			ia->ri_ops->ro_displayname,
406 407 408
			xprt->rx_buf.rb_max_requests,
			ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
	} else if (connstate < 0) {
409 410
		pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
			sap, rpc_get_port(sap), connstate);
411 412 413
	}
#endif

414 415 416 417 418 419 420 421 422 423
	return 0;
}

static struct rdma_cm_id *
rpcrdma_create_id(struct rpcrdma_xprt *xprt,
			struct rpcrdma_ia *ia, struct sockaddr *addr)
{
	struct rdma_cm_id *id;
	int rc;

424 425
	init_completion(&ia->ri_done);

426
	id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
427 428 429 430 431 432 433
	if (IS_ERR(id)) {
		rc = PTR_ERR(id);
		dprintk("RPC:       %s: rdma_create_id() failed %i\n",
			__func__, rc);
		return id;
	}

434
	ia->ri_async_rc = -ETIMEDOUT;
435 436 437 438 439 440
	rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
			__func__, rc);
		goto out;
	}
441 442
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
443 444 445 446
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

447
	ia->ri_async_rc = -ETIMEDOUT;
448 449 450 451 452 453
	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
			__func__, rc);
		goto out;
	}
454 455
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

	return id;

out:
	rdma_destroy_id(id);
	return ERR_PTR(rc);
}

/*
 * Drain any cq, prior to teardown.
 */
static void
rpcrdma_clean_cq(struct ib_cq *cq)
{
	struct ib_wc wc;
	int count = 0;

	while (1 == ib_poll_cq(cq, 1, &wc))
		++count;

	if (count)
		dprintk("RPC:       %s: flushed %d events (last 0x%x)\n",
			__func__, count, wc.opcode);
}

/*
 * Exported functions.
 */

/*
 * Open and initialize an Interface Adapter.
 *  o initializes fields of struct rpcrdma_ia, including
 *    interface and provider attributes and protection zone.
 */
int
rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
{
496
	int rc, mem_priv;
497
	struct rpcrdma_ia *ia = &xprt->rx_ia;
498
	struct ib_device_attr *devattr = &ia->ri_devattr;
499 500 501 502 503 504

	ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
	if (IS_ERR(ia->ri_id)) {
		rc = PTR_ERR(ia->ri_id);
		goto out1;
	}
505
	ia->ri_device = ia->ri_id->device;
506

507
	ia->ri_pd = ib_alloc_pd(ia->ri_device);
508 509 510 511 512 513 514
	if (IS_ERR(ia->ri_pd)) {
		rc = PTR_ERR(ia->ri_pd);
		dprintk("RPC:       %s: ib_alloc_pd() failed %i\n",
			__func__, rc);
		goto out2;
	}

515
	rc = ib_query_device(ia->ri_device, devattr);
516 517 518
	if (rc) {
		dprintk("RPC:       %s: ib_query_device failed %d\n",
			__func__, rc);
519
		goto out3;
520 521
	}

522
	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
523
		ia->ri_have_dma_lkey = 1;
524
		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
525 526
	}

527
	if (memreg == RPCRDMA_FRMR) {
528
		/* Requires both frmr reg and local dma lkey */
529
		if (((devattr->device_cap_flags &
530
		     (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
531 532
		    (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
		      (devattr->max_fast_reg_page_list_len == 0)) {
533
			dprintk("RPC:       %s: FRMR registration "
534 535
				"not supported by HCA\n", __func__);
			memreg = RPCRDMA_MTHCAFMR;
536
		}
537 538
	}
	if (memreg == RPCRDMA_MTHCAFMR) {
539
		if (!ia->ri_device->alloc_fmr) {
540 541 542 543
			dprintk("RPC:       %s: MTHCAFMR registration "
				"not supported by HCA\n", __func__);
			memreg = RPCRDMA_ALLPHYSICAL;
		}
544 545
	}

546 547 548 549 550 551 552 553
	/*
	 * Optionally obtain an underlying physical identity mapping in
	 * order to do a memory window-based bind. This base registration
	 * is protected from remote access - that is enabled only by binding
	 * for the specific bytes targeted during each RPC operation, and
	 * revoked after the corresponding completion similar to a storage
	 * adapter.
	 */
554
	switch (memreg) {
555
	case RPCRDMA_FRMR:
556
		ia->ri_ops = &rpcrdma_frwr_memreg_ops;
557 558
		break;
	case RPCRDMA_ALLPHYSICAL:
559
		ia->ri_ops = &rpcrdma_physical_memreg_ops;
560 561 562 563 564
		mem_priv = IB_ACCESS_LOCAL_WRITE |
				IB_ACCESS_REMOTE_WRITE |
				IB_ACCESS_REMOTE_READ;
		goto register_setup;
	case RPCRDMA_MTHCAFMR:
565
		ia->ri_ops = &rpcrdma_fmr_memreg_ops;
566
		if (ia->ri_have_dma_lkey)
567
			break;
568 569
		mem_priv = IB_ACCESS_LOCAL_WRITE;
	register_setup:
570 571 572
		ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
		if (IS_ERR(ia->ri_bind_mem)) {
			printk(KERN_ALERT "%s: ib_get_dma_mr for "
573
				"phys register failed with %lX\n",
574
				__func__, PTR_ERR(ia->ri_bind_mem));
575
			rc = -ENOMEM;
576
			goto out3;
577
		}
578 579
		break;
	default:
580 581 582
		printk(KERN_ERR "RPC: Unsupported memory "
				"registration mode: %d\n", memreg);
		rc = -ENOMEM;
583
		goto out3;
584
	}
585 586
	dprintk("RPC:       %s: memory registration strategy is '%s'\n",
		__func__, ia->ri_ops->ro_displayname);
587

588
	rwlock_init(&ia->ri_qplock);
589
	return 0;
590 591 592 593

out3:
	ib_dealloc_pd(ia->ri_pd);
	ia->ri_pd = NULL;
594 595
out2:
	rdma_destroy_id(ia->ri_id);
596
	ia->ri_id = NULL;
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
out1:
	return rc;
}

/*
 * Clean up/close an IA.
 *   o if event handles and PD have been initialized, free them.
 *   o close the IA
 */
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
	int rc;

	dprintk("RPC:       %s: entering\n", __func__);
	if (ia->ri_bind_mem != NULL) {
		rc = ib_dereg_mr(ia->ri_bind_mem);
		dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
			__func__, rc);
	}
617

618 619 620 621 622 623
	if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
		if (ia->ri_id->qp)
			rdma_destroy_qp(ia->ri_id);
		rdma_destroy_id(ia->ri_id);
		ia->ri_id = NULL;
	}
624 625 626 627

	/* If the pd is still busy, xprtrdma missed freeing a resource */
	if (ia->ri_pd && !IS_ERR(ia->ri_pd))
		WARN_ON(ib_dealloc_pd(ia->ri_pd));
628 629 630 631 632 633 634 635 636
}

/*
 * Create unconnected endpoint.
 */
int
rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
				struct rpcrdma_create_data_internal *cdata)
{
637
	struct ib_device_attr *devattr = &ia->ri_devattr;
638
	struct ib_cq *sendcq, *recvcq;
639
	struct ib_cq_init_attr cq_attr = {};
C
Chuck Lever 已提交
640
	int rc, err;
641 642

	/* check provider's send/recv wr limits */
643 644
	if (cdata->max_requests > devattr->max_qp_wr)
		cdata->max_requests = devattr->max_qp_wr;
645 646 647 648 649

	ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
	ep->rep_attr.qp_context = ep;
	ep->rep_attr.srq = NULL;
	ep->rep_attr.cap.max_send_wr = cdata->max_requests;
C
Chuck Lever 已提交
650 651 652
	rc = ia->ri_ops->ro_open(ia, ep, cdata);
	if (rc)
		return rc;
653 654 655 656 657 658 659 660
	ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
	ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
	ep->rep_attr.cap.max_recv_sge = 1;
	ep->rep_attr.cap.max_inline_data = 0;
	ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	ep->rep_attr.qp_type = IB_QPT_RC;
	ep->rep_attr.port_num = ~0;

661 662 663 664 665 666 667 668
	if (cdata->padding) {
		ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding,
						      GFP_KERNEL);
		if (IS_ERR(ep->rep_padbuf))
			return PTR_ERR(ep->rep_padbuf);
	} else
		ep->rep_padbuf = NULL;

669 670 671 672 673 674 675 676 677
	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
		"iovs: send %d recv %d\n",
		__func__,
		ep->rep_attr.cap.max_send_wr,
		ep->rep_attr.cap.max_recv_wr,
		ep->rep_attr.cap.max_send_sge,
		ep->rep_attr.cap.max_recv_sge);

	/* set trigger for requesting send completion */
678
	ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
C
Chuck Lever 已提交
679 680 681
	if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
		ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
	else if (ep->rep_cqinit <= 2)
682 683 684
		ep->rep_cqinit = 0;
	INIT_CQCOUNT(ep);
	init_waitqueue_head(&ep->rep_connect_wait);
685
	INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
686

687
	cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
688
	sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
689
			      rpcrdma_cq_async_error_upcall, ep, &cq_attr);
690 691 692
	if (IS_ERR(sendcq)) {
		rc = PTR_ERR(sendcq);
		dprintk("RPC:       %s: failed to create send CQ: %i\n",
693 694 695 696
			__func__, rc);
		goto out1;
	}

697
	rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
698 699 700 701 702 703
	if (rc) {
		dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
			__func__, rc);
		goto out2;
	}

704
	cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
705
	recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
706
			      rpcrdma_cq_async_error_upcall, ep, &cq_attr);
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	if (IS_ERR(recvcq)) {
		rc = PTR_ERR(recvcq);
		dprintk("RPC:       %s: failed to create recv CQ: %i\n",
			__func__, rc);
		goto out2;
	}

	rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
	if (rc) {
		dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
			__func__, rc);
		ib_destroy_cq(recvcq);
		goto out2;
	}

	ep->rep_attr.send_cq = sendcq;
	ep->rep_attr.recv_cq = recvcq;
724 725 726 727 728 729 730 731

	/* Initialize cma parameters */

	/* RPC/RDMA does not use private data */
	ep->rep_remote_cma.private_data = NULL;
	ep->rep_remote_cma.private_data_len = 0;

	/* Client offers RDMA Read but does not initiate */
732
	ep->rep_remote_cma.initiator_depth = 0;
733
	if (devattr->max_qp_rd_atom > 32)	/* arbitrary but <= 255 */
734 735
		ep->rep_remote_cma.responder_resources = 32;
	else
736 737
		ep->rep_remote_cma.responder_resources =
						devattr->max_qp_rd_atom;
738 739 740 741 742 743 744 745

	ep->rep_remote_cma.retry_count = 7;
	ep->rep_remote_cma.flow_control = 0;
	ep->rep_remote_cma.rnr_retry_count = 0;

	return 0;

out2:
746
	err = ib_destroy_cq(sendcq);
C
Chuck Lever 已提交
747 748 749
	if (err)
		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
			__func__, err);
750
out1:
751
	rpcrdma_free_regbuf(ia, ep->rep_padbuf);
752 753 754 755 756 757 758 759 760 761
	return rc;
}

/*
 * rpcrdma_ep_destroy
 *
 * Disconnect and destroy endpoint. After this, the only
 * valid operations on the ep are to free it (if dynamically
 * allocated) or re-create it.
 */
762
void
763 764 765 766 767 768 769
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc;

	dprintk("RPC:       %s: entering, connected is %d\n",
		__func__, ep->rep_connected);

770 771
	cancel_delayed_work_sync(&ep->rep_connect_worker);

772
	if (ia->ri_id->qp) {
773
		rpcrdma_ep_disconnect(ep, ia);
774 775
		rdma_destroy_qp(ia->ri_id);
		ia->ri_id->qp = NULL;
776 777
	}

778
	rpcrdma_free_regbuf(ia, ep->rep_padbuf);
779

780 781 782 783 784 785 786 787
	rpcrdma_clean_cq(ep->rep_attr.recv_cq);
	rc = ib_destroy_cq(ep->rep_attr.recv_cq);
	if (rc)
		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
			__func__, rc);

	rpcrdma_clean_cq(ep->rep_attr.send_cq);
	rc = ib_destroy_cq(ep->rep_attr.send_cq);
788 789 790 791 792 793 794 795 796 797 798
	if (rc)
		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
			__func__, rc);
}

/*
 * Connect unconnected endpoint.
 */
int
rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
799
	struct rdma_cm_id *id, *old;
800 801 802
	int rc = 0;
	int retry_count = 0;

803
	if (ep->rep_connected != 0) {
804 805
		struct rpcrdma_xprt *xprt;
retry:
806
		dprintk("RPC:       %s: reconnecting...\n", __func__);
807 808

		rpcrdma_ep_disconnect(ep, ia);
809
		rpcrdma_flush_cqs(ep);
810 811 812 813 814

		xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
		id = rpcrdma_create_id(xprt, ia,
				(struct sockaddr *)&xprt->rx_data.addr);
		if (IS_ERR(id)) {
815
			rc = -EHOSTUNREACH;
816 817 818 819 820 821 822 823 824
			goto out;
		}
		/* TEMP TEMP TEMP - fail if new device:
		 * Deregister/remarshal *all* requests!
		 * Close and recreate adapter, pd, etc!
		 * Re-determine all attributes still sane!
		 * More stuff I haven't thought of!
		 * Rrrgh!
		 */
825
		if (ia->ri_device != id->device) {
826 827 828
			printk("RPC:       %s: can't reconnect on "
				"different device!\n", __func__);
			rdma_destroy_id(id);
829
			rc = -ENETUNREACH;
830 831 832
			goto out;
		}
		/* END TEMP */
833 834 835 836 837 838 839 840
		rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
				__func__, rc);
			rdma_destroy_id(id);
			rc = -ENETUNREACH;
			goto out;
		}
841 842 843

		write_lock(&ia->ri_qplock);
		old = ia->ri_id;
844
		ia->ri_id = id;
845 846 847 848
		write_unlock(&ia->ri_qplock);

		rdma_destroy_qp(old);
		rdma_destroy_id(old);
849 850 851 852 853 854 855 856 857
	} else {
		dprintk("RPC:       %s: connecting...\n", __func__);
		rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
		if (rc) {
			dprintk("RPC:       %s: rdma_create_qp failed %i\n",
				__func__, rc);
			/* do not update ep->rep_connected */
			return -ENETUNREACH;
		}
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
	}

	ep->rep_connected = 0;

	rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
	if (rc) {
		dprintk("RPC:       %s: rdma_connect() failed with %i\n",
				__func__, rc);
		goto out;
	}

	wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);

	/*
	 * Check state. A non-peer reject indicates no listener
	 * (ECONNREFUSED), which may be a transient state. All
	 * others indicate a transport condition which has already
	 * undergone a best-effort.
	 */
877 878
	if (ep->rep_connected == -ECONNREFUSED &&
	    ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
879 880 881 882 883 884
		dprintk("RPC:       %s: non-peer_reject, retry\n", __func__);
		goto retry;
	}
	if (ep->rep_connected <= 0) {
		/* Sometimes, the only way to reliably connect to remote
		 * CMs is to use same nonzero values for ORD and IRD. */
885 886 887 888 889 890 891 892
		if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
		    (ep->rep_remote_cma.responder_resources == 0 ||
		     ep->rep_remote_cma.initiator_depth !=
				ep->rep_remote_cma.responder_resources)) {
			if (ep->rep_remote_cma.responder_resources == 0)
				ep->rep_remote_cma.responder_resources = 1;
			ep->rep_remote_cma.initiator_depth =
				ep->rep_remote_cma.responder_resources;
893
			goto retry;
894
		}
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
		rc = ep->rep_connected;
	} else {
		dprintk("RPC:       %s: connected\n", __func__);
	}

out:
	if (rc)
		ep->rep_connected = rc;
	return rc;
}

/*
 * rpcrdma_ep_disconnect
 *
 * This is separate from destroy to facilitate the ability
 * to reconnect without recreating the endpoint.
 *
 * This call is not reentrant, and must not be made in parallel
 * on the same endpoint.
 */
915
void
916 917 918 919
rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
	int rc;

920
	rpcrdma_flush_cqs(ep);
921 922 923 924 925 926 927 928 929 930 931 932 933
	rc = rdma_disconnect(ia->ri_id);
	if (!rc) {
		/* returns without wait if not connected */
		wait_event_interruptible(ep->rep_connect_wait,
							ep->rep_connected != 1);
		dprintk("RPC:       %s: after wait, %sconnected\n", __func__,
			(ep->rep_connected == 1) ? "still " : "dis");
	} else {
		dprintk("RPC:       %s: rdma_disconnect %i\n", __func__, rc);
		ep->rep_connected = rc;
	}
}

934 935 936 937 938
static struct rpcrdma_req *
rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_req *req;

939
	req = kzalloc(sizeof(*req), GFP_KERNEL);
940
	if (req == NULL)
941
		return ERR_PTR(-ENOMEM);
942 943 944 945 946 947 948 949 950 951 952 953 954 955

	req->rl_buffer = &r_xprt->rx_buf;
	return req;
}

static struct rpcrdma_rep *
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_rep *rep;
	int rc;

	rc = -ENOMEM;
956
	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
957 958 959
	if (rep == NULL)
		goto out;

960 961 962 963
	rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
					       GFP_KERNEL);
	if (IS_ERR(rep->rr_rdmabuf)) {
		rc = PTR_ERR(rep->rr_rdmabuf);
964
		goto out_free;
965
	}
966

967
	rep->rr_device = ia->ri_device;
968
	rep->rr_rxprt = r_xprt;
969 970 971 972 973 974 975 976
	return rep;

out_free:
	kfree(rep);
out:
	return ERR_PTR(rc);
}

977
int
978
rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
979
{
980 981 982
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
983
	char *p;
984
	size_t len;
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
	int i, rc;

	buf->rb_max_requests = cdata->max_requests;
	spin_lock_init(&buf->rb_lock);

	/* Need to allocate:
	 *   1.  arrays for send and recv pointers
	 *   2.  arrays of struct rpcrdma_req to fill in pointers
	 *   3.  array of struct rpcrdma_rep for replies
	 * Send/recv buffers in req/rep need to be registered
	 */
	len = buf->rb_max_requests *
		(sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));

	p = kzalloc(len, GFP_KERNEL);
	if (p == NULL) {
		dprintk("RPC:       %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
			__func__, len);
		rc = -ENOMEM;
		goto out;
	}
	buf->rb_pool = p;	/* for freeing it later */

	buf->rb_send_bufs = (struct rpcrdma_req **) p;
	p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
	buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
	p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];

C
Chuck Lever 已提交
1013 1014 1015
	rc = ia->ri_ops->ro_init(r_xprt);
	if (rc)
		goto out;
1016 1017 1018 1019 1020

	for (i = 0; i < buf->rb_max_requests; i++) {
		struct rpcrdma_req *req;
		struct rpcrdma_rep *rep;

1021 1022
		req = rpcrdma_create_req(r_xprt);
		if (IS_ERR(req)) {
1023 1024
			dprintk("RPC:       %s: request buffer %d alloc"
				" failed\n", __func__, i);
1025
			rc = PTR_ERR(req);
1026 1027 1028 1029
			goto out;
		}
		buf->rb_send_bufs[i] = req;

1030 1031
		rep = rpcrdma_create_rep(r_xprt);
		if (IS_ERR(rep)) {
1032 1033
			dprintk("RPC:       %s: reply buffer %d alloc failed\n",
				__func__, i);
1034
			rc = PTR_ERR(rep);
1035 1036 1037 1038
			goto out;
		}
		buf->rb_recv_bufs[i] = rep;
	}
1039

1040 1041 1042 1043 1044 1045
	return 0;
out:
	rpcrdma_buffer_destroy(buf);
	return rc;
}

1046 1047 1048 1049 1050 1051
static void
rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
{
	if (!rep)
		return;

1052
	rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
1053 1054 1055 1056 1057 1058 1059 1060 1061
	kfree(rep);
}

static void
rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
{
	if (!req)
		return;

1062
	rpcrdma_free_regbuf(ia, req->rl_sendbuf);
1063
	rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
1064 1065 1066
	kfree(req);
}

1067 1068 1069 1070
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1071
	int i;
1072 1073 1074 1075

	/* clean up in reverse order from create
	 *   1.  recv mr memory (mr free, then kfree)
	 *   2.  send mr memory (mr free, then kfree)
1076
	 *   3.  MWs
1077 1078 1079 1080
	 */
	dprintk("RPC:       %s: entering\n", __func__);

	for (i = 0; i < buf->rb_max_requests; i++) {
1081 1082 1083 1084
		if (buf->rb_recv_bufs)
			rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
		if (buf->rb_send_bufs)
			rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
1085 1086
	}

1087
	ia->ri_ops->ro_destroy(buf);
A
Allen Andrews 已提交
1088

1089 1090 1091
	kfree(buf->rb_pool);
}

1092 1093
struct rpcrdma_mw *
rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1094
{
1095 1096 1097
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	struct rpcrdma_mw *mw = NULL;

C
Chuck Lever 已提交
1098
	spin_lock(&buf->rb_mwlock);
1099 1100 1101 1102
	if (!list_empty(&buf->rb_mws)) {
		mw = list_first_entry(&buf->rb_mws,
				      struct rpcrdma_mw, mw_list);
		list_del_init(&mw->mw_list);
1103
	}
C
Chuck Lever 已提交
1104
	spin_unlock(&buf->rb_mwlock);
1105 1106 1107 1108

	if (!mw)
		pr_err("RPC:       %s: no MWs available\n", __func__);
	return mw;
1109 1110
}

1111 1112
void
rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1113
{
1114
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1115

C
Chuck Lever 已提交
1116
	spin_lock(&buf->rb_mwlock);
1117
	list_add_tail(&mw->mw_list, &buf->rb_mws);
C
Chuck Lever 已提交
1118
	spin_unlock(&buf->rb_mwlock);
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
}

static void
rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
{
	buf->rb_send_bufs[--buf->rb_send_index] = req;
	req->rl_niovs = 0;
	if (req->rl_reply) {
		buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
		req->rl_reply = NULL;
	}
}

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
/*
 * Get a set of request/reply buffers.
 *
 * Reply buffer (if needed) is attached to send buffer upon return.
 * Rule:
 *    rb_send_index and rb_recv_index MUST always be pointing to the
 *    *next* available buffer (non-NULL). They are incremented after
 *    removing buffers, and decremented *before* returning them.
 */
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
{
	struct rpcrdma_req *req;
	unsigned long flags;

	spin_lock_irqsave(&buffers->rb_lock, flags);
1148

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	if (buffers->rb_send_index == buffers->rb_max_requests) {
		spin_unlock_irqrestore(&buffers->rb_lock, flags);
		dprintk("RPC:       %s: out of request buffers\n", __func__);
		return ((struct rpcrdma_req *)NULL);
	}

	req = buffers->rb_send_bufs[buffers->rb_send_index];
	if (buffers->rb_send_index < buffers->rb_recv_index) {
		dprintk("RPC:       %s: %d extra receives outstanding (ok)\n",
			__func__,
			buffers->rb_recv_index - buffers->rb_send_index);
		req->rl_reply = NULL;
	} else {
		req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
		buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
	}
	buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
1166

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
	return req;
}

/*
 * Put request/reply buffers back into pool.
 * Pre-decrement counter/array index.
 */
void
rpcrdma_buffer_put(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
	unsigned long flags;

	spin_lock_irqsave(&buffers->rb_lock, flags);
1182
	rpcrdma_buffer_put_sendbuf(req, buffers);
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
}

/*
 * Recover reply buffers from pool.
 * This happens when recovering from error conditions.
 * Post-increment counter/array index.
 */
void
rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
{
	struct rpcrdma_buffer *buffers = req->rl_buffer;
	unsigned long flags;

	spin_lock_irqsave(&buffers->rb_lock, flags);
	if (buffers->rb_recv_index < buffers->rb_max_requests) {
		req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
		buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
	}
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
}

/*
 * Put reply buffers back into pool when not attached to
1207
 * request. This happens in error conditions.
1208 1209 1210 1211
 */
void
rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
{
1212
	struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
	unsigned long flags;

	spin_lock_irqsave(&buffers->rb_lock, flags);
	buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
	spin_unlock_irqrestore(&buffers->rb_lock, flags);
}

/*
 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
 */

1224 1225 1226 1227 1228 1229 1230 1231
void
rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
{
	dprintk("RPC:       map_one: offset %p iova %llx len %zu\n",
		seg->mr_offset,
		(unsigned long long)seg->mr_dma, seg->mr_dmalen);
}

1232
static int
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
				struct ib_mr **mrp, struct ib_sge *iov)
{
	struct ib_phys_buf ipb;
	struct ib_mr *mr;
	int rc;

	/*
	 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
	 */
1243
	iov->addr = ib_dma_map_single(ia->ri_device,
1244
			va, len, DMA_BIDIRECTIONAL);
1245
	if (ib_dma_mapping_error(ia->ri_device, iov->addr))
1246 1247
		return -ENOMEM;

1248 1249
	iov->length = len;

1250 1251 1252 1253 1254
	if (ia->ri_have_dma_lkey) {
		*mrp = NULL;
		iov->lkey = ia->ri_dma_lkey;
		return 0;
	} else if (ia->ri_bind_mem != NULL) {
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
		*mrp = NULL;
		iov->lkey = ia->ri_bind_mem->lkey;
		return 0;
	}

	ipb.addr = iov->addr;
	ipb.size = iov->length;
	mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
			IB_ACCESS_LOCAL_WRITE, &iov->addr);

	dprintk("RPC:       %s: phys convert: 0x%llx "
			"registered 0x%llx length %d\n",
1267 1268
			__func__, (unsigned long long)ipb.addr,
			(unsigned long long)iov->addr, len);
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282

	if (IS_ERR(mr)) {
		*mrp = NULL;
		rc = PTR_ERR(mr);
		dprintk("RPC:       %s: failed with %i\n", __func__, rc);
	} else {
		*mrp = mr;
		iov->lkey = mr->lkey;
		rc = 0;
	}

	return rc;
}

1283
static int
1284 1285 1286 1287 1288
rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
				struct ib_mr *mr, struct ib_sge *iov)
{
	int rc;

1289 1290
	ib_dma_unmap_single(ia->ri_device,
			    iov->addr, iov->length, DMA_BIDIRECTIONAL);
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300

	if (NULL == mr)
		return 0;

	rc = ib_dereg_mr(mr);
	if (rc)
		dprintk("RPC:       %s: ib_dereg_mr failed %i\n", __func__, rc);
	return rc;
}

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
/**
 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
 * @ia: controlling rpcrdma_ia
 * @size: size of buffer to be allocated, in bytes
 * @flags: GFP flags
 *
 * Returns pointer to private header of an area of internally
 * registered memory, or an ERR_PTR. The registered buffer follows
 * the end of the private header.
 *
 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
 * receiving the payload of RDMA RECV operations. regbufs are not
 * used for RDMA READ/WRITE operations, thus are registered only for
 * LOCAL access.
 */
struct rpcrdma_regbuf *
rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
{
	struct rpcrdma_regbuf *rb;
	int rc;

	rc = -ENOMEM;
	rb = kmalloc(sizeof(*rb) + size, flags);
	if (rb == NULL)
		goto out;

	rb->rg_size = size;
	rb->rg_owner = NULL;
	rc = rpcrdma_register_internal(ia, rb->rg_base, size,
				       &rb->rg_mr, &rb->rg_iov);
	if (rc)
		goto out_free;

	return rb;

out_free:
	kfree(rb);
out:
	return ERR_PTR(rc);
}

/**
 * rpcrdma_free_regbuf - deregister and free registered buffer
 * @ia: controlling rpcrdma_ia
 * @rb: regbuf to be deregistered and freed
 */
void
rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{
	if (rb) {
		rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
		kfree(rb);
	}
}

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
/*
 * Prepost any receive buffer, then post send.
 *
 * Receive buffer is donated to hardware, reclaimed upon recv completion.
 */
int
rpcrdma_ep_post(struct rpcrdma_ia *ia,
		struct rpcrdma_ep *ep,
		struct rpcrdma_req *req)
{
	struct ib_send_wr send_wr, *send_wr_fail;
	struct rpcrdma_rep *rep = req->rl_reply;
	int rc;

	if (rep) {
		rc = rpcrdma_ep_post_recv(ia, ep, rep);
		if (rc)
			goto out;
		req->rl_reply = NULL;
	}

	send_wr.next = NULL;
1378
	send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
1379 1380 1381 1382
	send_wr.sg_list = req->rl_send_iov;
	send_wr.num_sge = req->rl_niovs;
	send_wr.opcode = IB_WR_SEND;
	if (send_wr.num_sge == 4)	/* no need to sync any pad (constant) */
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
		ib_dma_sync_single_for_device(ia->ri_device,
					      req->rl_send_iov[3].addr,
					      req->rl_send_iov[3].length,
					      DMA_TO_DEVICE);
	ib_dma_sync_single_for_device(ia->ri_device,
				      req->rl_send_iov[1].addr,
				      req->rl_send_iov[1].length,
				      DMA_TO_DEVICE);
	ib_dma_sync_single_for_device(ia->ri_device,
				      req->rl_send_iov[0].addr,
				      req->rl_send_iov[0].length,
				      DMA_TO_DEVICE);
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423

	if (DECR_CQCOUNT(ep) > 0)
		send_wr.send_flags = 0;
	else { /* Provider must take a send completion every now and then */
		INIT_CQCOUNT(ep);
		send_wr.send_flags = IB_SEND_SIGNALED;
	}

	rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
	if (rc)
		dprintk("RPC:       %s: ib_post_send returned %i\n", __func__,
			rc);
out:
	return rc;
}

/*
 * (Re)post a receive buffer.
 */
int
rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
		     struct rpcrdma_ep *ep,
		     struct rpcrdma_rep *rep)
{
	struct ib_recv_wr recv_wr, *recv_wr_fail;
	int rc;

	recv_wr.next = NULL;
	recv_wr.wr_id = (u64) (unsigned long) rep;
1424
	recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1425 1426
	recv_wr.num_sge = 1;

1427
	ib_dma_sync_single_for_cpu(ia->ri_device,
1428 1429 1430
				   rdmab_addr(rep->rr_rdmabuf),
				   rdmab_length(rep->rr_rdmabuf),
				   DMA_BIDIRECTIONAL);
1431 1432 1433 1434 1435 1436 1437 1438

	rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);

	if (rc)
		dprintk("RPC:       %s: ib_post_recv returned %i\n", __func__,
			rc);
	return rc;
}
1439

1440
/* How many chunk list items fit within our inline buffers?
1441
 */
1442 1443
unsigned int
rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
1444 1445
{
	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1446
	int bytes, segments;
1447

1448 1449 1450 1451 1452 1453
	bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
	bytes -= RPCRDMA_HDRLEN_MIN;
	if (bytes < sizeof(struct rpcrdma_segment) * 2) {
		pr_warn("RPC:       %s: inline threshold too small\n",
			__func__);
		return 0;
1454
	}
1455 1456 1457 1458 1459

	segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
	dprintk("RPC:       %s: max chunk list size = %d segments\n",
		__func__, segments);
	return segments;
1460
}