svc_rdma_transport.c 30.7 KB
Newer Older
1
/*
S
Steve Wise 已提交
2
 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Author: Tom Tucker <tom@opengridcomputing.com>
 */

#include <linux/sunrpc/svc_xprt.h>
44
#include <linux/sunrpc/addr.h>
45 46
#include <linux/sunrpc/debug.h>
#include <linux/sunrpc/rpc_rdma.h>
47
#include <linux/interrupt.h>
48
#include <linux/sched.h>
49
#include <linux/slab.h>
50
#include <linux/spinlock.h>
51
#include <linux/workqueue.h>
52 53 54
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>
55
#include <linux/export.h>
56
#include "xprt_rdma.h"
57 58 59

#define RPCDBG_FACILITY	RPCDBG_SVCXPRT

60
static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);
61
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
62
					struct net *net,
63 64 65 66 67 68 69
					struct sockaddr *sa, int salen,
					int flags);
static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
static void svc_rdma_release_rqst(struct svc_rqst *);
static void svc_rdma_detach(struct svc_xprt *xprt);
static void svc_rdma_free(struct svc_xprt *xprt);
static int svc_rdma_has_wspace(struct svc_xprt *xprt);
70
static int svc_rdma_secure_port(struct svc_rqst *);
71
static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
72 73 74 75 76 77 78 79 80 81 82

static struct svc_xprt_ops svc_rdma_ops = {
	.xpo_create = svc_rdma_create,
	.xpo_recvfrom = svc_rdma_recvfrom,
	.xpo_sendto = svc_rdma_sendto,
	.xpo_release_rqst = svc_rdma_release_rqst,
	.xpo_detach = svc_rdma_detach,
	.xpo_free = svc_rdma_free,
	.xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
	.xpo_has_wspace = svc_rdma_has_wspace,
	.xpo_accept = svc_rdma_accept,
83
	.xpo_secure_port = svc_rdma_secure_port,
84
	.xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
85 86 87 88 89 90
};

struct svc_xprt_class svc_rdma_class = {
	.xcl_name = "rdma",
	.xcl_owner = THIS_MODULE,
	.xcl_ops = &svc_rdma_ops,
91
	.xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
92
	.xcl_ident = XPRT_TRANSPORT_RDMA,
93 94
};

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *,
					   struct sockaddr *, int, int);
static void svc_rdma_bc_detach(struct svc_xprt *);
static void svc_rdma_bc_free(struct svc_xprt *);

static struct svc_xprt_ops svc_rdma_bc_ops = {
	.xpo_create = svc_rdma_bc_create,
	.xpo_detach = svc_rdma_bc_detach,
	.xpo_free = svc_rdma_bc_free,
	.xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
	.xpo_secure_port = svc_rdma_secure_port,
};

struct svc_xprt_class svc_rdma_bc_class = {
	.xcl_name = "rdma-bc",
	.xcl_owner = THIS_MODULE,
	.xcl_ops = &svc_rdma_bc_ops,
	.xcl_max_payload = (1024 - RPCRDMA_HDRLEN_MIN)
};

static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
					   struct net *net,
					   struct sockaddr *sa, int salen,
					   int flags)
{
	struct svcxprt_rdma *cma_xprt;
	struct svc_xprt *xprt;

	cma_xprt = rdma_create_xprt(serv, 0);
	if (!cma_xprt)
		return ERR_PTR(-ENOMEM);
	xprt = &cma_xprt->sc_xprt;

	svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
130
	set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
	serv->sv_bc_xprt = xprt;

	dprintk("svcrdma: %s(%p)\n", __func__, xprt);
	return xprt;
}

static void svc_rdma_bc_detach(struct svc_xprt *xprt)
{
	dprintk("svcrdma: %s(%p)\n", __func__, xprt);
}

static void svc_rdma_bc_free(struct svc_xprt *xprt)
{
	struct svcxprt_rdma *rdma =
		container_of(xprt, struct svcxprt_rdma, sc_xprt);

	dprintk("svcrdma: %s(%p)\n", __func__, xprt);
	if (xprt)
		kfree(rdma);
}
#endif	/* CONFIG_SUNRPC_BACKCHANNEL */

153 154
static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
					   gfp_t flags)
155 156 157
{
	struct svc_rdma_op_ctxt *ctxt;

158 159 160
	ctxt = kmalloc(sizeof(*ctxt), flags);
	if (ctxt) {
		ctxt->xprt = xprt;
161
		INIT_LIST_HEAD(&ctxt->list);
162 163 164 165 166 167
	}
	return ctxt;
}

static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
{
168
	unsigned int i;
169 170 171 172

	/* Each RPC/RDMA credit can consume a number of send
	 * and receive WQEs. One ctxt is allocated for each.
	 */
173
	i = xprt->sc_sq_depth + xprt->sc_rq_depth;
174 175 176 177 178 179 180 181 182

	while (i--) {
		struct svc_rdma_op_ctxt *ctxt;

		ctxt = alloc_ctxt(xprt, GFP_KERNEL);
		if (!ctxt) {
			dprintk("svcrdma: No memory for RDMA ctxt\n");
			return false;
		}
183
		list_add(&ctxt->list, &xprt->sc_ctxts);
184 185 186 187 188 189 190 191
	}
	return true;
}

struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
{
	struct svc_rdma_op_ctxt *ctxt = NULL;

192
	spin_lock(&xprt->sc_ctxt_lock);
193 194 195 196 197
	xprt->sc_ctxt_used++;
	if (list_empty(&xprt->sc_ctxts))
		goto out_empty;

	ctxt = list_first_entry(&xprt->sc_ctxts,
198 199
				struct svc_rdma_op_ctxt, list);
	list_del(&ctxt->list);
200
	spin_unlock(&xprt->sc_ctxt_lock);
201 202

out:
203
	ctxt->count = 0;
204
	ctxt->mapped_sges = 0;
205
	return ctxt;
206 207 208 209 210

out_empty:
	/* Either pre-allocation missed the mark, or send
	 * queue accounting is broken.
	 */
211
	spin_unlock(&xprt->sc_ctxt_lock);
212 213 214 215 216

	ctxt = alloc_ctxt(xprt, GFP_NOIO);
	if (ctxt)
		goto out;

217
	spin_lock(&xprt->sc_ctxt_lock);
218
	xprt->sc_ctxt_used--;
219
	spin_unlock(&xprt->sc_ctxt_lock);
220 221
	WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n");
	return NULL;
222 223
}

224
void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
225 226
{
	struct svcxprt_rdma *xprt = ctxt->xprt;
227
	struct ib_device *device = xprt->sc_cm_id->device;
C
Chuck Lever 已提交
228
	unsigned int i;
229

230 231 232 233 234
	for (i = 0; i < ctxt->mapped_sges; i++)
		ib_dma_unmap_page(device,
				  ctxt->sge[i].addr,
				  ctxt->sge[i].length,
				  ctxt->direction);
235
	ctxt->mapped_sges = 0;
236 237
}

238 239
void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
{
240
	struct svcxprt_rdma *xprt = ctxt->xprt;
241 242 243 244 245 246
	int i;

	if (free_pages)
		for (i = 0; i < ctxt->count; i++)
			put_page(ctxt->pages[i]);

247
	spin_lock(&xprt->sc_ctxt_lock);
248
	xprt->sc_ctxt_used--;
249
	list_add(&ctxt->list, &xprt->sc_ctxts);
250
	spin_unlock(&xprt->sc_ctxt_lock);
251 252 253 254 255 256 257 258
}

static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
{
	while (!list_empty(&xprt->sc_ctxts)) {
		struct svc_rdma_op_ctxt *ctxt;

		ctxt = list_first_entry(&xprt->sc_ctxts,
259 260
					struct svc_rdma_op_ctxt, list);
		list_del(&ctxt->list);
261 262
		kfree(ctxt);
	}
263 264 265 266 267 268 269 270 271 272 273 274 275
}

/* QP event handler */
static void qp_event_handler(struct ib_event *event, void *context)
{
	struct svc_xprt *xprt = context;

	switch (event->event) {
	/* These are considered benign events */
	case IB_EVENT_PATH_MIG:
	case IB_EVENT_COMM_EST:
	case IB_EVENT_SQ_DRAINED:
	case IB_EVENT_QP_LAST_WQE_REACHED:
276 277 278
		dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
			ib_event_msg(event->event), event->event,
			event->element.qp);
279 280 281 282 283 284 285 286
		break;
	/* These are considered fatal events */
	case IB_EVENT_PATH_MIG_ERR:
	case IB_EVENT_QP_FATAL:
	case IB_EVENT_QP_REQ_ERR:
	case IB_EVENT_QP_ACCESS_ERR:
	case IB_EVENT_DEVICE_FATAL:
	default:
287
		dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
288
			"closing transport\n",
289 290
			ib_event_msg(event->event), event->event,
			event->element.qp);
291 292 293 294 295
		set_bit(XPT_CLOSE, &xprt->xpt_flags);
		break;
	}
}

296 297 298 299
/**
 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
 * @cq:        completion queue
 * @wc:        completed WR
300 301
 *
 */
302
static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
303
{
304 305 306
	struct svcxprt_rdma *xprt = cq->cq_context;
	struct ib_cqe *cqe = wc->wr_cqe;
	struct svc_rdma_op_ctxt *ctxt;
307

308 309 310
	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
	ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
	svc_rdma_unmap_dma(ctxt);
311

312 313
	if (wc->status != IB_WC_SUCCESS)
		goto flushed;
314

315 316 317
	/* All wc fields are now known to be valid */
	ctxt->byte_len = wc->byte_len;
	spin_lock(&xprt->sc_rq_dto_lock);
318
	list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
319
	spin_unlock(&xprt->sc_rq_dto_lock);
320 321

	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
		goto out;
	svc_xprt_enqueue(&xprt->sc_xprt);
	goto out;

flushed:
	if (wc->status != IB_WC_WR_FLUSH_ERR)
		pr_warn("svcrdma: receive: %s (%u/0x%x)\n",
			ib_wc_status_msg(wc->status),
			wc->status, wc->vendor_err);
	set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
	svc_rdma_put_context(ctxt, 1);

out:
	svc_xprt_put(&xprt->sc_xprt);
337 338
}

339 340 341 342 343 344 345 346 347 348 349
/**
 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
 * @cq:        completion queue
 * @wc:        completed WR
 *
 */
void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
{
	struct svcxprt_rdma *xprt = cq->cq_context;
	struct ib_cqe *cqe = wc->wr_cqe;
	struct svc_rdma_op_ctxt *ctxt;
350

351 352
	atomic_inc(&xprt->sc_sq_avail);
	wake_up(&xprt->sc_send_wait);
353

354 355
	ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
	svc_rdma_unmap_dma(ctxt);
356
	svc_rdma_put_context(ctxt, 1);
357

358 359 360 361 362 363
	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
		if (wc->status != IB_WC_WR_FLUSH_ERR)
			pr_err("svcrdma: Send: %s (%u/0x%x)\n",
			       ib_wc_status_msg(wc->status),
			       wc->status, wc->vendor_err);
364 365
	}

366
	svc_xprt_put(&xprt->sc_xprt);
367 368 369 370 371 372 373 374 375
}

static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
					     int listener)
{
	struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);

	if (!cma_xprt)
		return NULL;
376
	svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
377 378 379
	INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
	INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
	INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
380
	INIT_LIST_HEAD(&cma_xprt->sc_ctxts);
381
	INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
382 383 384 385
	init_waitqueue_head(&cma_xprt->sc_send_wait);

	spin_lock_init(&cma_xprt->sc_lock);
	spin_lock_init(&cma_xprt->sc_rq_dto_lock);
386
	spin_lock_init(&cma_xprt->sc_ctxt_lock);
387
	spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
388

389 390 391 392 393 394 395 396
	/*
	 * Note that this implies that the underlying transport support
	 * has some form of congestion control (see RFC 7530 section 3.1
	 * paragraph 2). For now, we assume that all supported RDMA
	 * transports are suitable here.
	 */
	set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);

397
	if (listener)
398 399 400 401 402
		set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);

	return cma_xprt;
}

403
int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
404 405 406 407
{
	struct ib_recv_wr recv_wr, *bad_recv_wr;
	struct svc_rdma_op_ctxt *ctxt;
	struct page *page;
408
	dma_addr_t pa;
409 410 411 412 413 414 415
	int sge_no;
	int buflen;
	int ret;

	ctxt = svc_rdma_get_context(xprt);
	buflen = 0;
	ctxt->direction = DMA_FROM_DEVICE;
416
	ctxt->cqe.done = svc_rdma_wc_receive;
417
	for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
418 419 420 421
		if (sge_no >= xprt->sc_max_sge) {
			pr_err("svcrdma: Too many sges (%d)\n", sge_no);
			goto err_put_ctxt;
		}
422 423 424
		page = alloc_page(flags);
		if (!page)
			goto err_put_ctxt;
425
		ctxt->pages[sge_no] = page;
426 427
		pa = ib_dma_map_page(xprt->sc_cm_id->device,
				     page, 0, PAGE_SIZE,
428
				     DMA_FROM_DEVICE);
429 430
		if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
			goto err_put_ctxt;
431
		svc_rdma_count_mappings(xprt, ctxt);
432 433
		ctxt->sge[sge_no].addr = pa;
		ctxt->sge[sge_no].length = PAGE_SIZE;
C
Christoph Hellwig 已提交
434
		ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
435
		ctxt->count = sge_no + 1;
436 437 438 439 440
		buflen += PAGE_SIZE;
	}
	recv_wr.next = NULL;
	recv_wr.sg_list = &ctxt->sge[0];
	recv_wr.num_sge = ctxt->count;
441
	recv_wr.wr_cqe = &ctxt->cqe;
442

443
	svc_xprt_get(&xprt->sc_xprt);
444
	ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
445
	if (ret) {
S
Steve Wise 已提交
446
		svc_rdma_unmap_dma(ctxt);
447
		svc_rdma_put_context(ctxt, 1);
S
Steve Wise 已提交
448
		svc_xprt_put(&xprt->sc_xprt);
449
	}
450
	return ret;
451 452

 err_put_ctxt:
453
	svc_rdma_unmap_dma(ctxt);
454 455
	svc_rdma_put_context(ctxt, 1);
	return -ENOMEM;
456 457
}

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
{
	int ret = 0;

	ret = svc_rdma_post_recv(xprt, flags);
	if (ret) {
		pr_err("svcrdma: could not post a receive buffer, err=%d.\n",
		       ret);
		pr_err("svcrdma: closing transport %p.\n", xprt);
		set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
		ret = -ENOTCONN;
	}
	return ret;
}

473 474 475 476 477 478 479 480 481
static void
svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
			       struct rdma_conn_param *param)
{
	const struct rpcrdma_connect_private *pmsg = param->private_data;

	if (pmsg &&
	    pmsg->cp_magic == rpcrdma_cmp_magic &&
	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
482 483 484 485 486
		newxprt->sc_snd_w_inv = pmsg->cp_flags &
					RPCRDMA_CMP_F_SND_W_INV_OK;

		dprintk("svcrdma: client send_size %u, recv_size %u "
			"remote inv %ssupported\n",
487
			rpcrdma_decode_buffer_size(pmsg->cp_send_size),
488 489
			rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
			newxprt->sc_snd_w_inv ? "" : "un");
490 491 492
	}
}

493 494 495 496 497 498 499 500 501 502 503
/*
 * This function handles the CONNECT_REQUEST event on a listening
 * endpoint. It is passed the cma_id for the _new_ connection. The context in
 * this cma_id is inherited from the listening cma_id and is the svc_xprt
 * structure for the listening endpoint.
 *
 * This function creates a new xprt for the new connection and enqueues it on
 * the accept queue for the listent xprt. When the listen thread is kicked, it
 * will call the recvfrom method on the listen xprt which will accept the new
 * connection.
 */
504 505
static void handle_connect_req(struct rdma_cm_id *new_cma_id,
			       struct rdma_conn_param *param)
506 507 508
{
	struct svcxprt_rdma *listen_xprt = new_cma_id->context;
	struct svcxprt_rdma *newxprt;
509
	struct sockaddr *sa;
510 511 512 513 514 515 516 517 518 519 520

	/* Create a new transport */
	newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
	if (!newxprt) {
		dprintk("svcrdma: failed to create new transport\n");
		return;
	}
	newxprt->sc_cm_id = new_cma_id;
	new_cma_id->context = newxprt;
	dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
		newxprt, newxprt->sc_cm_id, listen_xprt);
521
	svc_rdma_parse_connect_private(newxprt, param);
522

523
	/* Save client advertised inbound read limit for use later in accept. */
524
	newxprt->sc_ord = param->initiator_depth;
525

526 527 528 529 530 531
	/* Set the local and remote addresses in the transport */
	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
	svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
	sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
	svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));

532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
	/*
	 * Enqueue the new transport on the accept queue of the listening
	 * transport
	 */
	spin_lock_bh(&listen_xprt->sc_lock);
	list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
	spin_unlock_bh(&listen_xprt->sc_lock);

	set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
	svc_xprt_enqueue(&listen_xprt->sc_xprt);
}

/*
 * Handles events generated on the listening endpoint. These events will be
 * either be incoming connect requests or adapter removal  events.
 */
static int rdma_listen_handler(struct rdma_cm_id *cma_id,
			       struct rdma_cm_event *event)
{
	struct svcxprt_rdma *xprt = cma_id->context;
	int ret = 0;

	switch (event->event) {
	case RDMA_CM_EVENT_CONNECT_REQUEST:
		dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
557 558
			"event = %s (%d)\n", cma_id, cma_id->context,
			rdma_event_msg(event->event), event->event);
559
		handle_connect_req(cma_id, &event->param.conn);
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
		break;

	case RDMA_CM_EVENT_ESTABLISHED:
		/* Accept complete */
		dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
			"cm_id=%p\n", xprt, cma_id);
		break;

	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
			xprt, cma_id);
		if (xprt)
			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
		break;

	default:
		dprintk("svcrdma: Unexpected event on listening endpoint %p, "
577 578
			"event = %s (%d)\n", cma_id,
			rdma_event_msg(event->event), event->event);
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
		break;
	}

	return ret;
}

static int rdma_cma_handler(struct rdma_cm_id *cma_id,
			    struct rdma_cm_event *event)
{
	struct svc_xprt *xprt = cma_id->context;
	struct svcxprt_rdma *rdma =
		container_of(xprt, struct svcxprt_rdma, sc_xprt);
	switch (event->event) {
	case RDMA_CM_EVENT_ESTABLISHED:
		/* Accept complete */
594
		svc_xprt_get(xprt);
595 596 597 598 599 600 601 602 603 604 605
		dprintk("svcrdma: Connection completed on DTO xprt=%p, "
			"cm_id=%p\n", xprt, cma_id);
		clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
		svc_xprt_enqueue(xprt);
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
		dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
			xprt, cma_id);
		if (xprt) {
			set_bit(XPT_CLOSE, &xprt->xpt_flags);
			svc_xprt_enqueue(xprt);
606
			svc_xprt_put(xprt);
607 608 609 610
		}
		break;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
611 612
			"event = %s (%d)\n", cma_id, xprt,
			rdma_event_msg(event->event), event->event);
613 614 615
		if (xprt) {
			set_bit(XPT_CLOSE, &xprt->xpt_flags);
			svc_xprt_enqueue(xprt);
616
			svc_xprt_put(xprt);
617 618 619 620
		}
		break;
	default:
		dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
621 622
			"event = %s (%d)\n", cma_id,
			rdma_event_msg(event->event), event->event);
623 624 625 626 627 628 629 630 631
		break;
	}
	return 0;
}

/*
 * Create a listening RDMA service endpoint.
 */
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
632
					struct net *net,
633 634 635 636 637 638 639 640
					struct sockaddr *sa, int salen,
					int flags)
{
	struct rdma_cm_id *listen_id;
	struct svcxprt_rdma *cma_xprt;
	int ret;

	dprintk("svcrdma: Creating RDMA socket\n");
S
Shirley Ma 已提交
641
	if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
642 643 644
		dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
		return ERR_PTR(-EAFNOSUPPORT);
	}
645 646
	cma_xprt = rdma_create_xprt(serv, 1);
	if (!cma_xprt)
647
		return ERR_PTR(-ENOMEM);
648

649 650
	listen_id = rdma_create_id(&init_net, rdma_listen_handler, cma_xprt,
				   RDMA_PS_TCP, IB_QPT_RC);
651
	if (IS_ERR(listen_id)) {
652 653 654
		ret = PTR_ERR(listen_id);
		dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
		goto err0;
655
	}
656

S
Shirley Ma 已提交
657 658 659 660 661 662 663 664 665 666
	/* Allow both IPv4 and IPv6 sockets to bind a single port
	 * at the same time.
	 */
#if IS_ENABLED(CONFIG_IPV6)
	ret = rdma_set_afonly(listen_id, 1);
	if (ret) {
		dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
		goto err1;
	}
#endif
667 668 669
	ret = rdma_bind_addr(listen_id, sa);
	if (ret) {
		dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
670
		goto err1;
671 672 673 674 675 676
	}
	cma_xprt->sc_cm_id = listen_id;

	ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
	if (ret) {
		dprintk("svcrdma: rdma_listen failed = %d\n", ret);
677
		goto err1;
678 679 680 681 682 683 684 685 686 687
	}

	/*
	 * We need to use the address from the cm_id in case the
	 * caller specified 0 for the port number.
	 */
	sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
	svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);

	return &cma_xprt->sc_xprt;
688 689 690 691 692 693

 err1:
	rdma_destroy_id(listen_id);
 err0:
	kfree(cma_xprt);
	return ERR_PTR(ret);
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
}

/*
 * This is the xpo_recvfrom function for listening endpoints. Its
 * purpose is to accept incoming connections. The CMA callback handler
 * has already created a new transport and attached it to the new CMA
 * ID.
 *
 * There is a queue of pending connections hung on the listening
 * transport. This queue contains the new svc_xprt structure. This
 * function takes svc_xprt structures off the accept_q and completes
 * the connection.
 */
static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
{
	struct svcxprt_rdma *listen_rdma;
	struct svcxprt_rdma *newxprt = NULL;
	struct rdma_conn_param conn_param;
712
	struct rpcrdma_connect_private pmsg;
713
	struct ib_qp_init_attr qp_attr;
714
	struct ib_device *dev;
715
	struct sockaddr *sap;
716
	unsigned int i;
717
	int ret = 0;
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736

	listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
	clear_bit(XPT_CONN, &xprt->xpt_flags);
	/* Get the next entry off the accept list */
	spin_lock_bh(&listen_rdma->sc_lock);
	if (!list_empty(&listen_rdma->sc_accept_q)) {
		newxprt = list_entry(listen_rdma->sc_accept_q.next,
				     struct svcxprt_rdma, sc_accept_q);
		list_del_init(&newxprt->sc_accept_q);
	}
	if (!list_empty(&listen_rdma->sc_accept_q))
		set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
	spin_unlock_bh(&listen_rdma->sc_lock);
	if (!newxprt)
		return NULL;

	dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
		newxprt, newxprt->sc_cm_id);

737
	dev = newxprt->sc_cm_id->device;
738
	newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
739 740 741

	/* Qualify the transport resource defaults with the
	 * capabilities of this particular device */
742
	newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge,
743
				  (size_t)RPCSVC_MAXPAGES);
744
	newxprt->sc_max_req_size = svcrdma_max_req_size;
745 746
	newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr,
					 svcrdma_max_requests);
747
	newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
748 749 750 751
	newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr,
					    svcrdma_max_bc_requests);
	newxprt->sc_rq_depth = newxprt->sc_max_requests +
			       newxprt->sc_max_bc_requests;
752
	newxprt->sc_sq_depth = newxprt->sc_rq_depth;
753
	atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
754

755 756 757
	if (!svc_rdma_prealloc_ctxts(newxprt))
		goto errout;

758 759 760 761
	/*
	 * Limit ORD based on client limit, local device limit, and
	 * configured svcrdma limit.
	 */
762
	newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord);
763
	newxprt->sc_ord = min_t(size_t,	svcrdma_ord, newxprt->sc_ord);
764

765
	newxprt->sc_pd = ib_alloc_pd(dev, 0);
766 767 768 769
	if (IS_ERR(newxprt->sc_pd)) {
		dprintk("svcrdma: error creating PD for connect request\n");
		goto errout;
	}
770
	newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
771
					0, IB_POLL_WORKQUEUE);
772 773 774 775
	if (IS_ERR(newxprt->sc_sq_cq)) {
		dprintk("svcrdma: error creating SQ CQ for connect request\n");
		goto errout;
	}
776
	newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth,
777
					0, IB_POLL_WORKQUEUE);
778 779 780 781 782 783 784 785
	if (IS_ERR(newxprt->sc_rq_cq)) {
		dprintk("svcrdma: error creating RQ CQ for connect request\n");
		goto errout;
	}

	memset(&qp_attr, 0, sizeof qp_attr);
	qp_attr.event_handler = qp_event_handler;
	qp_attr.qp_context = &newxprt->sc_xprt;
786 787
	qp_attr.port_num = newxprt->sc_cm_id->port_num;
	qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests;
788
	qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
789
	qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
790 791 792 793 794 795
	qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
	qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	qp_attr.qp_type = IB_QPT_RC;
	qp_attr.send_cq = newxprt->sc_sq_cq;
	qp_attr.recv_cq = newxprt->sc_rq_cq;
796 797 798 799 800 801
	dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
		newxprt->sc_cm_id, newxprt->sc_pd);
	dprintk("    cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
		qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
	dprintk("    cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
		qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
802 803 804

	ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
	if (ret) {
805 806
		dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
		goto errout;
807 808 809
	}
	newxprt->sc_qp = newxprt->sc_cm_id->qp;

810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
	/*
	 * Use the most secure set of MR resources based on the
	 * transport type and available memory management features in
	 * the device. Here's the table implemented below:
	 *
	 *		Fast	Global	DMA	Remote WR
	 *		Reg	LKEY	MR	Access
	 *		Sup'd	Sup'd	Needed	Needed
	 *
	 * IWARP	N	N	Y	Y
	 *		N	Y	Y	Y
	 *		Y	N	Y	N
	 *		Y	Y	N	-
	 *
	 * IB		N	N	Y	N
	 *		N	Y	N	-
	 *		Y	N	Y	N
	 *		Y	Y	N	-
	 *
	 * NB:	iWARP requires remote write access for the data sink
	 *	of an RDMA_READ. IB does not.
	 */
832
	if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
833
		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
834 835
	} else
		newxprt->sc_snd_w_inv = false;
836 837 838 839

	/*
	 * Determine if a DMA MR is required and if so, what privs are required
	 */
840 841
	if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
	    !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num))
842
		goto errout;
M
Michael Wang 已提交
843

844
	if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num))
M
Michael Wang 已提交
845 846
		newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;

847
	/* Post receive buffers */
848
	for (i = 0; i < newxprt->sc_max_requests; i++) {
849
		ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
850 851 852 853 854 855 856 857 858
		if (ret) {
			dprintk("svcrdma: failure posting receive buffers\n");
			goto errout;
		}
	}

	/* Swap out the handler */
	newxprt->sc_cm_id->event_handler = rdma_cma_handler;

859 860 861 862 863 864 865
	/* Construct RDMA-CM private message */
	pmsg.cp_magic = rpcrdma_cmp_magic;
	pmsg.cp_version = RPCRDMA_CMP_VERSION;
	pmsg.cp_flags = 0;
	pmsg.cp_send_size = pmsg.cp_recv_size =
		rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);

866 867 868 869 870
	/* Accept Connection */
	set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
	memset(&conn_param, 0, sizeof conn_param);
	conn_param.responder_resources = 0;
	conn_param.initiator_depth = newxprt->sc_ord;
871 872
	conn_param.private_data = &pmsg;
	conn_param.private_data_len = sizeof(pmsg);
873 874 875 876 877 878 879
	ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
	if (ret) {
		dprintk("svcrdma: failed to accept new connection, ret=%d\n",
		       ret);
		goto errout;
	}

880 881 882 883 884 885 886 887 888
	dprintk("svcrdma: new connection %p accepted:\n", newxprt);
	sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
	dprintk("    local address   : %pIS:%u\n", sap, rpc_get_port(sap));
	sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
	dprintk("    remote address  : %pIS:%u\n", sap, rpc_get_port(sap));
	dprintk("    max_sge         : %d\n", newxprt->sc_max_sge);
	dprintk("    sq_depth        : %d\n", newxprt->sc_sq_depth);
	dprintk("    max_requests    : %d\n", newxprt->sc_max_requests);
	dprintk("    ord             : %d\n", newxprt->sc_ord);
889 890 891 892 893

	return &newxprt->sc_xprt;

 errout:
	dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
894 895
	/* Take a reference in case the DTO handler runs */
	svc_xprt_get(&newxprt->sc_xprt);
896
	if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
897
		ib_destroy_qp(newxprt->sc_qp);
898
	rdma_destroy_id(newxprt->sc_cm_id);
899 900
	/* This call to put will destroy the transport */
	svc_xprt_put(&newxprt->sc_xprt);
901 902 903 904 905 906 907
	return NULL;
}

static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
{
}

908
/*
909
 * When connected, an svc_xprt has at least two references:
910 911 912 913 914 915 916 917
 *
 * - A reference held by the cm_id between the ESTABLISHED and
 *   DISCONNECTED events. If the remote peer disconnected first, this
 *   reference could be gone.
 *
 * - A reference held by the svc_recv code that called this function
 *   as part of close processing.
 *
918
 * At a minimum one references should still be held.
919
 */
920 921 922 923 924
static void svc_rdma_detach(struct svc_xprt *xprt)
{
	struct svcxprt_rdma *rdma =
		container_of(xprt, struct svcxprt_rdma, sc_xprt);
	dprintk("svc: svc_rdma_detach(%p)\n", xprt);
925 926

	/* Disconnect and flush posted WQE */
927 928 929
	rdma_disconnect(rdma->sc_cm_id);
}

930
static void __svc_rdma_free(struct work_struct *work)
931
{
932 933
	struct svcxprt_rdma *rdma =
		container_of(work, struct svcxprt_rdma, sc_work);
934 935 936
	struct svc_xprt *xprt = &rdma->sc_xprt;

	dprintk("svcrdma: %s(%p)\n", __func__, rdma);
937

938 939 940
	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
		ib_drain_qp(rdma->sc_qp);

941
	/* We should only be called from kref_put */
942
	if (kref_read(&xprt->xpt_ref) != 0)
943
		pr_err("svcrdma: sc_xprt still in use? (%d)\n",
944
		       kref_read(&xprt->xpt_ref));
945

946 947
	while (!list_empty(&rdma->sc_read_complete_q)) {
		struct svc_rdma_op_ctxt *ctxt;
948 949 950
		ctxt = list_first_entry(&rdma->sc_read_complete_q,
					struct svc_rdma_op_ctxt, list);
		list_del(&ctxt->list);
951 952 953 954
		svc_rdma_put_context(ctxt, 1);
	}
	while (!list_empty(&rdma->sc_rq_dto_q)) {
		struct svc_rdma_op_ctxt *ctxt;
955 956 957
		ctxt = list_first_entry(&rdma->sc_rq_dto_q,
					struct svc_rdma_op_ctxt, list);
		list_del(&ctxt->list);
958 959 960 961
		svc_rdma_put_context(ctxt, 1);
	}

	/* Warn if we leaked a resource or under-referenced */
962
	if (rdma->sc_ctxt_used != 0)
963
		pr_err("svcrdma: ctxt still in use? (%d)\n",
964
		       rdma->sc_ctxt_used);
965

966 967 968 969 970 971
	/* Final put of backchannel client transport */
	if (xprt->xpt_bc_xprt) {
		xprt_put(xprt->xpt_bc_xprt);
		xprt->xpt_bc_xprt = NULL;
	}

972
	svc_rdma_destroy_rw_ctxts(rdma);
973
	svc_rdma_destroy_ctxts(rdma);
T
Tom Tucker 已提交
974

975 976 977 978
	/* Destroy the QP if present (not a listener) */
	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
		ib_destroy_qp(rdma->sc_qp);

979
	if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
980
		ib_free_cq(rdma->sc_sq_cq);
981

982
	if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
983
		ib_free_cq(rdma->sc_rq_cq);
984

985 986
	if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
		ib_dealloc_pd(rdma->sc_pd);
987

988 989 990
	/* Destroy the CM ID */
	rdma_destroy_id(rdma->sc_cm_id);

991
	kfree(rdma);
992 993
}

994 995 996 997 998
static void svc_rdma_free(struct svc_xprt *xprt)
{
	struct svcxprt_rdma *rdma =
		container_of(xprt, struct svcxprt_rdma, sc_xprt);
	INIT_WORK(&rdma->sc_work, __svc_rdma_free);
999
	queue_work(svc_rdma_wq, &rdma->sc_work);
1000 1001
}

1002 1003 1004 1005 1006 1007
static int svc_rdma_has_wspace(struct svc_xprt *xprt)
{
	struct svcxprt_rdma *rdma =
		container_of(xprt, struct svcxprt_rdma, sc_xprt);

	/*
S
Steve Wise 已提交
1008
	 * If there are already waiters on the SQ,
1009 1010 1011 1012 1013 1014 1015 1016 1017
	 * return false.
	 */
	if (waitqueue_active(&rdma->sc_send_wait))
		return 0;

	/* Otherwise return true. */
	return 1;
}

1018 1019 1020 1021 1022
static int svc_rdma_secure_port(struct svc_rqst *rqstp)
{
	return 1;
}

1023 1024 1025 1026
static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
{
}

1027 1028
int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
{
1029 1030 1031
	struct ib_send_wr *bad_wr, *n_wr;
	int wr_count;
	int i;
1032 1033 1034
	int ret;

	if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1035
		return -ENOTCONN;
1036

1037 1038 1039 1040
	wr_count = 1;
	for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
		wr_count++;

1041 1042
	/* If the SQ is full, wait until an SQ entry is available */
	while (1) {
1043
		if ((atomic_sub_return(wr_count, &xprt->sc_sq_avail) < 0)) {
1044
			atomic_inc(&rdma_stat_sq_starve);
1045

1046
			/* Wait until SQ WR available if SQ still full */
1047
			atomic_add(wr_count, &xprt->sc_sq_avail);
1048
			wait_event(xprt->sc_send_wait,
1049
				   atomic_read(&xprt->sc_sq_avail) > wr_count);
1050
			if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1051
				return -ENOTCONN;
1052 1053
			continue;
		}
1054 1055 1056 1057 1058
		/* Take a transport ref for each WR posted */
		for (i = 0; i < wr_count; i++)
			svc_xprt_get(&xprt->sc_xprt);

		/* Bump used SQ WR count and post */
1059
		ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1060 1061 1062 1063
		if (ret) {
			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
			for (i = 0; i < wr_count; i ++)
				svc_xprt_put(&xprt->sc_xprt);
1064 1065 1066 1067
			dprintk("svcrdma: failed to post SQ WR rc=%d\n", ret);
			dprintk("    sc_sq_avail=%d, sc_sq_depth=%d\n",
				atomic_read(&xprt->sc_sq_avail),
				xprt->sc_sq_depth);
1068
			wake_up(&xprt->sc_send_wait);
1069
		}
1070 1071 1072 1073
		break;
	}
	return ret;
}