rdma.c 64.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9
/*
 * NVMe over Fabrics RDMA host code.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
I
Israel Rukshin 已提交
10
#include <rdma/mr_pool.h>
11 12 13 14
#include <linux/err.h>
#include <linux/string.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
15
#include <linux/blk-mq-rdma.h>
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/nvme.h>
#include <asm/unaligned.h>

#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/nvme-rdma.h>

#include "nvme.h"
#include "fabrics.h"


31
#define NVME_RDMA_CONNECT_TIMEOUT_MS	3000		/* 3 second */
32 33 34

#define NVME_RDMA_MAX_SEGMENTS		256

35
#define NVME_RDMA_MAX_INLINE_SEGMENTS	4
36

37 38 39 40 41
#define NVME_RDMA_DATA_SGL_SIZE \
	(sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
#define NVME_RDMA_METADATA_SGL_SIZE \
	(sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)

42
struct nvme_rdma_device {
43 44
	struct ib_device	*dev;
	struct ib_pd		*pd;
45 46
	struct kref		ref;
	struct list_head	entry;
47
	unsigned int		num_inline_segments;
48 49 50 51 52 53 54 55
};

struct nvme_rdma_qe {
	struct ib_cqe		cqe;
	void			*data;
	u64			dma;
};

56 57 58 59 60
struct nvme_rdma_sgl {
	int			nents;
	struct sg_table		sg_table;
};

61 62
struct nvme_rdma_queue;
struct nvme_rdma_request {
63
	struct nvme_request	req;
64 65
	struct ib_mr		*mr;
	struct nvme_rdma_qe	sqe;
66 67 68
	union nvme_result	result;
	__le16			status;
	refcount_t		ref;
69 70 71 72 73
	struct ib_sge		sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
	u32			num_sge;
	struct ib_reg_wr	reg_wr;
	struct ib_cqe		reg_cqe;
	struct nvme_rdma_queue  *queue;
74
	struct nvme_rdma_sgl	data_sgl;
75 76
	struct nvme_rdma_sgl	*metadata_sgl;
	bool			use_sig_mr;
77 78 79
};

enum nvme_rdma_queue_flags {
80 81
	NVME_RDMA_Q_ALLOCATED		= 0,
	NVME_RDMA_Q_LIVE		= 1,
82
	NVME_RDMA_Q_TR_READY		= 2,
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
};

struct nvme_rdma_queue {
	struct nvme_rdma_qe	*rsp_ring;
	int			queue_size;
	size_t			cmnd_capsule_len;
	struct nvme_rdma_ctrl	*ctrl;
	struct nvme_rdma_device	*device;
	struct ib_cq		*ib_cq;
	struct ib_qp		*qp;

	unsigned long		flags;
	struct rdma_cm_id	*cm_id;
	int			cm_error;
	struct completion	cm_done;
98
	bool			pi_support;
99
	int			cq_size;
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
};

struct nvme_rdma_ctrl {
	/* read only in the hot path */
	struct nvme_rdma_queue	*queues;

	/* other member variables */
	struct blk_mq_tag_set	tag_set;
	struct work_struct	err_work;

	struct nvme_rdma_qe	async_event_sqe;

	struct delayed_work	reconnect_work;

	struct list_head	list;

	struct blk_mq_tag_set	admin_tag_set;
	struct nvme_rdma_device	*device;

	u32			max_fr_pages;

121 122
	struct sockaddr_storage addr;
	struct sockaddr_storage src_addr;
123 124

	struct nvme_ctrl	ctrl;
125
	struct mutex		teardown_lock;
126
	bool			use_inline_data;
127
	u32			io_queues[HCTX_MAX_TYPES];
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
};

static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
}

static LIST_HEAD(device_list);
static DEFINE_MUTEX(device_list_mutex);

static LIST_HEAD(nvme_rdma_ctrl_list);
static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);

/*
 * Disabling this option makes small I/O goes faster, but is fundamentally
 * unsafe.  With it turned off we will have to register a global rkey that
 * allows read and write access to all physical memory.
 */
static bool register_always = true;
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
	 "Use memory registration even for contiguous memory regions");

static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event);
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
154
static void nvme_rdma_complete_rq(struct request *rq);
155

156 157 158
static const struct blk_mq_ops nvme_rdma_mq_ops;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;

159 160 161 162 163
static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
{
	return queue - queue->ctrl->queues;
}

164 165 166
static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
{
	return nvme_rdma_queue_idx(queue) >
167 168
		queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
		queue->ctrl->io_queues[HCTX_TYPE_READ];
169 170
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
{
	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
}

static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
		size_t capsule_size, enum dma_data_direction dir)
{
	ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
	kfree(qe->data);
}

static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
		size_t capsule_size, enum dma_data_direction dir)
{
	qe->data = kzalloc(capsule_size, GFP_KERNEL);
	if (!qe->data)
		return -ENOMEM;

	qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
	if (ib_dma_mapping_error(ibdev, qe->dma)) {
		kfree(qe->data);
193
		qe->data = NULL;
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
		return -ENOMEM;
	}

	return 0;
}

static void nvme_rdma_free_ring(struct ib_device *ibdev,
		struct nvme_rdma_qe *ring, size_t ib_queue_size,
		size_t capsule_size, enum dma_data_direction dir)
{
	int i;

	for (i = 0; i < ib_queue_size; i++)
		nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
	kfree(ring);
}

static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
		size_t ib_queue_size, size_t capsule_size,
		enum dma_data_direction dir)
{
	struct nvme_rdma_qe *ring;
	int i;

	ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
	if (!ring)
		return NULL;

222 223 224 225 226
	/*
	 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
	 * lifetime. It's safe, since any chage in the underlying RDMA device
	 * will issue error recovery and queue re-creation.
	 */
227 228 229 230 231 232 233 234 235 236 237 238 239 240
	for (i = 0; i < ib_queue_size; i++) {
		if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
			goto out_free_ring;
	}

	return ring;

out_free_ring:
	nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
	return NULL;
}

static void nvme_rdma_qp_event(struct ib_event *event, void *context)
{
241 242 243
	pr_debug("QP event %s (%d)\n",
		 ib_event_msg(event->event), event->event);

244 245 246 247
}

static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
248 249 250
	int ret;

	ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
251
			msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
252 253 254 255 256
	if (ret < 0)
		return ret;
	if (ret == 0)
		return -ETIMEDOUT;
	WARN_ON_ONCE(queue->cm_error > 0);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	return queue->cm_error;
}

static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
{
	struct nvme_rdma_device *dev = queue->device;
	struct ib_qp_init_attr init_attr;
	int ret;

	memset(&init_attr, 0, sizeof(init_attr));
	init_attr.event_handler = nvme_rdma_qp_event;
	/* +1 for drain */
	init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
	/* +1 for drain */
	init_attr.cap.max_recv_wr = queue->queue_size + 1;
	init_attr.cap.max_recv_sge = 1;
273
	init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
274 275 276 277
	init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	init_attr.qp_type = IB_QPT_RC;
	init_attr.send_cq = queue->ib_cq;
	init_attr.recv_cq = queue->ib_cq;
278 279
	if (queue->pi_support)
		init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
280
	init_attr.qp_context = queue;
281 282 283 284 285 286 287

	ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);

	queue->qp = queue->cm_id->qp;
	return ret;
}

288 289
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
		struct request *rq, unsigned int hctx_idx)
290 291 292
{
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);

293
	kfree(req->sqe.data);
294 295
}

296 297 298
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
		struct request *rq, unsigned int hctx_idx,
		unsigned int numa_node)
299
{
300
	struct nvme_rdma_ctrl *ctrl = set->driver_data;
301
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
302
	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
303 304
	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];

305
	nvme_req(rq)->ctrl = &ctrl->ctrl;
306 307 308
	req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
	if (!req->sqe.data)
		return -ENOMEM;
309

310 311 312 313 314 315
	/* metadata nvme_rdma_sgl struct is located after command's data SGL */
	if (queue->pi_support)
		req->metadata_sgl = (void *)nvme_req(rq) +
			sizeof(struct nvme_rdma_request) +
			NVME_RDMA_DATA_SGL_SIZE;

316 317 318 319 320 321 322 323 324 325 326
	req->queue = queue;

	return 0;
}

static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_rdma_ctrl *ctrl = data;
	struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];

327
	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386

	hctx->driver_data = queue;
	return 0;
}

static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_rdma_ctrl *ctrl = data;
	struct nvme_rdma_queue *queue = &ctrl->queues[0];

	BUG_ON(hctx_idx != 0);

	hctx->driver_data = queue;
	return 0;
}

static void nvme_rdma_free_dev(struct kref *ref)
{
	struct nvme_rdma_device *ndev =
		container_of(ref, struct nvme_rdma_device, ref);

	mutex_lock(&device_list_mutex);
	list_del(&ndev->entry);
	mutex_unlock(&device_list_mutex);

	ib_dealloc_pd(ndev->pd);
	kfree(ndev);
}

static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
{
	kref_put(&dev->ref, nvme_rdma_free_dev);
}

static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
{
	return kref_get_unless_zero(&dev->ref);
}

static struct nvme_rdma_device *
nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
	struct nvme_rdma_device *ndev;

	mutex_lock(&device_list_mutex);
	list_for_each_entry(ndev, &device_list, entry) {
		if (ndev->dev->node_guid == cm_id->device->node_guid &&
		    nvme_rdma_dev_get(ndev))
			goto out_unlock;
	}

	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
	if (!ndev)
		goto out_err;

	ndev->dev = cm_id->device;
	kref_init(&ndev->ref);

387 388
	ndev->pd = ib_alloc_pd(ndev->dev,
		register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
389 390 391 392 393 394 395
	if (IS_ERR(ndev->pd))
		goto out_free_dev;

	if (!(ndev->dev->attrs.device_cap_flags &
	      IB_DEVICE_MEM_MGT_EXTENSIONS)) {
		dev_err(&ndev->dev->dev,
			"Memory registrations not supported.\n");
396
		goto out_free_pd;
397 398
	}

399
	ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
400
					ndev->dev->attrs.max_send_sge - 1);
401 402 403 404 405 406 407 408 409 410 411 412 413 414
	list_add(&ndev->entry, &device_list);
out_unlock:
	mutex_unlock(&device_list_mutex);
	return ndev;

out_free_pd:
	ib_dealloc_pd(ndev->pd);
out_free_dev:
	kfree(ndev);
out_err:
	mutex_unlock(&device_list_mutex);
	return NULL;
}

415 416 417 418 419 420 421 422
static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
{
	if (nvme_rdma_poll_queue(queue))
		ib_free_cq(queue->ib_cq);
	else
		ib_cq_pool_put(queue->ib_cq, queue->cq_size);
}

423 424
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
425 426 427 428 429 430 431 432
	struct nvme_rdma_device *dev;
	struct ib_device *ibdev;

	if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
		return;

	dev = queue->device;
	ibdev = dev->dev;
433

434 435
	if (queue->pi_support)
		ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
I
Israel Rukshin 已提交
436 437
	ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);

438 439 440 441 442 443
	/*
	 * The cm_id object might have been destroyed during RDMA connection
	 * establishment error flow to avoid getting other cma events, thus
	 * the destruction of the QP shouldn't use rdma_cm API.
	 */
	ib_destroy_qp(queue->qp);
444
	nvme_rdma_free_cq(queue);
445 446 447 448 449 450 451

	nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
			sizeof(struct nvme_completion), DMA_FROM_DEVICE);

	nvme_rdma_dev_put(dev);
}

452
static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
I
Israel Rukshin 已提交
453
{
454 455 456 457 458 459 460 461
	u32 max_page_list_len;

	if (pi_support)
		max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
	else
		max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;

	return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
I
Israel Rukshin 已提交
462 463
}

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
static int nvme_rdma_create_cq(struct ib_device *ibdev,
		struct nvme_rdma_queue *queue)
{
	int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
	enum ib_poll_context poll_ctx;

	/*
	 * Spread I/O queues completion vectors according their queue index.
	 * Admin queues can always go on completion vector 0.
	 */
	comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;

	/* Polling queues need direct cq polling context */
	if (nvme_rdma_poll_queue(queue)) {
		poll_ctx = IB_POLL_DIRECT;
		queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
					   comp_vector, poll_ctx);
	} else {
		poll_ctx = IB_POLL_SOFTIRQ;
		queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
					      comp_vector, poll_ctx);
	}

	if (IS_ERR(queue->ib_cq)) {
		ret = PTR_ERR(queue->ib_cq);
		return ret;
	}

	return 0;
}

495
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
496
{
497
	struct ib_device *ibdev;
498 499
	const int send_wr_factor = 3;			/* MR, SEND, INV */
	const int cq_factor = send_wr_factor + 1;	/* + RECV */
500
	int ret, pages_per_mr;
501

502 503 504 505 506 507 508
	queue->device = nvme_rdma_find_get_device(queue->cm_id);
	if (!queue->device) {
		dev_err(queue->cm_id->device->dev.parent,
			"no client data found!\n");
		return -ECONNREFUSED;
	}
	ibdev = queue->device->dev;
509 510

	/* +1 for ib_stop_cq */
511 512 513 514
	queue->cq_size = cq_factor * queue->queue_size + 1;

	ret = nvme_rdma_create_cq(ibdev, queue);
	if (ret)
515
		goto out_put_dev;
516 517 518 519 520 521 522 523 524 525 526 527

	ret = nvme_rdma_create_qp(queue, send_wr_factor);
	if (ret)
		goto out_destroy_ib_cq;

	queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
			sizeof(struct nvme_completion), DMA_FROM_DEVICE);
	if (!queue->rsp_ring) {
		ret = -ENOMEM;
		goto out_destroy_qp;
	}

528 529 530 531 532
	/*
	 * Currently we don't use SG_GAPS MR's so if the first entry is
	 * misaligned we'll end up using two entries for a single data page,
	 * so one additional entry is required.
	 */
533
	pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
I
Israel Rukshin 已提交
534 535 536
	ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
			      queue->queue_size,
			      IB_MR_TYPE_MEM_REG,
537
			      pages_per_mr, 0);
I
Israel Rukshin 已提交
538 539 540
	if (ret) {
		dev_err(queue->ctrl->ctrl.device,
			"failed to initialize MR pool sized %d for QID %d\n",
541
			queue->queue_size, nvme_rdma_queue_idx(queue));
I
Israel Rukshin 已提交
542 543 544
		goto out_destroy_ring;
	}

545 546 547 548 549 550 551
	if (queue->pi_support) {
		ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
				      queue->queue_size, IB_MR_TYPE_INTEGRITY,
				      pages_per_mr, pages_per_mr);
		if (ret) {
			dev_err(queue->ctrl->ctrl.device,
				"failed to initialize PI MR pool sized %d for QID %d\n",
552
				queue->queue_size, nvme_rdma_queue_idx(queue));
553 554 555 556
			goto out_destroy_mr_pool;
		}
	}

557 558
	set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);

559 560
	return 0;

561 562
out_destroy_mr_pool:
	ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
I
Israel Rukshin 已提交
563 564 565
out_destroy_ring:
	nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
			    sizeof(struct nvme_completion), DMA_FROM_DEVICE);
566
out_destroy_qp:
567
	rdma_destroy_qp(queue->cm_id);
568
out_destroy_ib_cq:
569
	nvme_rdma_free_cq(queue);
570 571
out_put_dev:
	nvme_rdma_dev_put(queue->device);
572 573 574
	return ret;
}

575
static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
576 577 578
		int idx, size_t queue_size)
{
	struct nvme_rdma_queue *queue;
579
	struct sockaddr *src_addr = NULL;
580 581 582 583
	int ret;

	queue = &ctrl->queues[idx];
	queue->ctrl = ctrl;
584 585 586 587
	if (idx && ctrl->ctrl.max_integrity_segments)
		queue->pi_support = true;
	else
		queue->pi_support = false;
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	init_completion(&queue->cm_done);

	if (idx > 0)
		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
	else
		queue->cmnd_capsule_len = sizeof(struct nvme_command);

	queue->queue_size = queue_size;

	queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
			RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(queue->cm_id)) {
		dev_info(ctrl->ctrl.device,
			"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
		return PTR_ERR(queue->cm_id);
	}

605
	if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
606
		src_addr = (struct sockaddr *)&ctrl->src_addr;
607

608 609 610
	queue->cm_error = -ETIMEDOUT;
	ret = rdma_resolve_addr(queue->cm_id, src_addr,
			(struct sockaddr *)&ctrl->addr,
611 612 613 614 615 616 617 618 619 620
			NVME_RDMA_CONNECT_TIMEOUT_MS);
	if (ret) {
		dev_info(ctrl->ctrl.device,
			"rdma_resolve_addr failed (%d).\n", ret);
		goto out_destroy_cm_id;
	}

	ret = nvme_rdma_wait_for_cm(queue);
	if (ret) {
		dev_info(ctrl->ctrl.device,
621
			"rdma connection establishment failed (%d)\n", ret);
622 623 624
		goto out_destroy_cm_id;
	}

625
	set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
626 627 628 629 630

	return 0;

out_destroy_cm_id:
	rdma_destroy_id(queue->cm_id);
631
	nvme_rdma_destroy_queue_ib(queue);
632 633 634
	return ret;
}

635 636 637 638 639 640
static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
	rdma_disconnect(queue->cm_id);
	ib_drain_qp(queue->qp);
}

641 642
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
643 644
	if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
		return;
645
	__nvme_rdma_stop_queue(queue);
646 647 648 649
}

static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
{
650
	if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
651 652
		return;

653 654 655 656
	nvme_rdma_destroy_queue_ib(queue);
	rdma_destroy_id(queue->cm_id);
}

657
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
658
{
659 660 661 662
	int i;

	for (i = 1; i < ctrl->ctrl.queue_count; i++)
		nvme_rdma_free_queue(&ctrl->queues[i]);
663 664
}

665
static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
666 667 668
{
	int i;

669
	for (i = 1; i < ctrl->ctrl.queue_count; i++)
670
		nvme_rdma_stop_queue(&ctrl->queues[i]);
671 672
}

673 674
static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
{
675 676
	struct nvme_rdma_queue *queue = &ctrl->queues[idx];
	bool poll = nvme_rdma_poll_queue(queue);
677 678 679
	int ret;

	if (idx)
680
		ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
681 682 683
	else
		ret = nvmf_connect_admin_queue(&ctrl->ctrl);

684
	if (!ret) {
685
		set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
686
	} else {
687 688
		if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
			__nvme_rdma_stop_queue(queue);
689 690
		dev_info(ctrl->ctrl.device,
			"failed to connect queue: %d ret=%d\n", idx, ret);
691
	}
692 693 694 695
	return ret;
}

static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
696 697 698
{
	int i, ret = 0;

699
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
700 701
		ret = nvme_rdma_start_queue(ctrl, i);
		if (ret)
702
			goto out_stop_queues;
703 704
	}

705 706
	return 0;

707
out_stop_queues:
708 709
	for (i--; i >= 1; i--)
		nvme_rdma_stop_queue(&ctrl->queues[i]);
710 711 712
	return ret;
}

713
static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
714
{
715
	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
716
	struct ib_device *ibdev = ctrl->device->dev;
717 718
	unsigned int nr_io_queues, nr_default_queues;
	unsigned int nr_read_queues, nr_poll_queues;
719 720
	int i, ret;

721 722 723 724 725 726
	nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
				min(opts->nr_io_queues, num_online_cpus()));
	nr_default_queues =  min_t(unsigned int, ibdev->num_comp_vectors,
				min(opts->nr_write_queues, num_online_cpus()));
	nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
	nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
727

728 729 730 731
	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
	if (ret)
		return ret;

732 733
	ctrl->ctrl.queue_count = nr_io_queues + 1;
	if (ctrl->ctrl.queue_count < 2)
734 735 736 737 738
		return 0;

	dev_info(ctrl->ctrl.device,
		"creating %d I/O queues.\n", nr_io_queues);

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
	if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
		/*
		 * separate read/write queues
		 * hand out dedicated default queues only after we have
		 * sufficient read queues.
		 */
		ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
			min(nr_default_queues, nr_io_queues);
		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
	} else {
		/*
		 * shared read/write queues
		 * either no write queues were requested, or we don't have
		 * sufficient queue count to have dedicated default queues.
		 */
		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
			min(nr_read_queues, nr_io_queues);
		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
	}

	if (opts->nr_poll_queues && nr_io_queues) {
		/* map dedicated poll queues only if we have queues left */
		ctrl->io_queues[HCTX_TYPE_POLL] =
			min(nr_poll_queues, nr_io_queues);
	}

767
	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
768 769 770
		ret = nvme_rdma_alloc_queue(ctrl, i,
				ctrl->ctrl.sqsize + 1);
		if (ret)
771 772 773 774 775 776
			goto out_free_queues;
	}

	return 0;

out_free_queues:
777
	for (i--; i >= 1; i--)
778
		nvme_rdma_free_queue(&ctrl->queues[i]);
779 780 781 782

	return ret;
}

783 784 785 786 787 788 789 790 791 792 793
static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
		bool admin)
{
	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
	struct blk_mq_tag_set *set;
	int ret;

	if (admin) {
		set = &ctrl->admin_tag_set;
		memset(set, 0, sizeof(*set));
		set->ops = &nvme_rdma_admin_mq_ops;
K
Keith Busch 已提交
794
		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
795
		set->reserved_tags = 2; /* connect + keep-alive */
796
		set->numa_node = nctrl->numa_node;
797
		set->cmd_size = sizeof(struct nvme_rdma_request) +
798
				NVME_RDMA_DATA_SGL_SIZE;
799 800 801
		set->driver_data = ctrl;
		set->nr_hw_queues = 1;
		set->timeout = ADMIN_TIMEOUT;
802
		set->flags = BLK_MQ_F_NO_SCHED;
803 804 805 806
	} else {
		set = &ctrl->tag_set;
		memset(set, 0, sizeof(*set));
		set->ops = &nvme_rdma_mq_ops;
807
		set->queue_depth = nctrl->sqsize + 1;
808
		set->reserved_tags = 1; /* fabric connect */
809
		set->numa_node = nctrl->numa_node;
810 811
		set->flags = BLK_MQ_F_SHOULD_MERGE;
		set->cmd_size = sizeof(struct nvme_rdma_request) +
812 813 814 815
				NVME_RDMA_DATA_SGL_SIZE;
		if (nctrl->max_integrity_segments)
			set->cmd_size += sizeof(struct nvme_rdma_sgl) +
					 NVME_RDMA_METADATA_SGL_SIZE;
816 817 818
		set->driver_data = ctrl;
		set->nr_hw_queues = nctrl->queue_count - 1;
		set->timeout = NVME_IO_TIMEOUT;
819
		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
820 821 822 823
	}

	ret = blk_mq_alloc_tag_set(set);
	if (ret)
824
		return ERR_PTR(ret);
825 826 827 828

	return set;
}

829 830
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
		bool remove)
831
{
832 833
	if (remove) {
		blk_cleanup_queue(ctrl->ctrl.admin_q);
834
		blk_cleanup_queue(ctrl->ctrl.fabrics_q);
835
		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
836
	}
837
	if (ctrl->async_event_sqe.data) {
838
		cancel_work_sync(&ctrl->ctrl.async_event_work);
839 840 841 842
		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
				sizeof(struct nvme_command), DMA_TO_DEVICE);
		ctrl->async_event_sqe.data = NULL;
	}
843
	nvme_rdma_free_queue(&ctrl->queues[0]);
844 845
}

846 847
static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
		bool new)
848
{
849
	bool pi_capable = false;
850 851
	int error;

852
	error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
853 854 855 856
	if (error)
		return error;

	ctrl->device = ctrl->queues[0].device;
857
	ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
858

859 860 861 862 863 864 865
	/* T10-PI support */
	if (ctrl->device->dev->attrs.device_cap_flags &
	    IB_DEVICE_INTEGRITY_HANDOVER)
		pi_capable = true;

	ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
							pi_capable);
866

867 868 869 870 871
	/*
	 * Bind the async event SQE DMA mapping to the admin queue lifetime.
	 * It's safe, since any chage in the underlying RDMA device will issue
	 * error recovery and queue re-creation.
	 */
872 873 874 875 876
	error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
			sizeof(struct nvme_command), DMA_TO_DEVICE);
	if (error)
		goto out_free_queue;

877 878
	if (new) {
		ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
879 880
		if (IS_ERR(ctrl->ctrl.admin_tagset)) {
			error = PTR_ERR(ctrl->ctrl.admin_tagset);
881
			goto out_free_async_qe;
882
		}
883

884 885 886 887 888 889
		ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
		if (IS_ERR(ctrl->ctrl.fabrics_q)) {
			error = PTR_ERR(ctrl->ctrl.fabrics_q);
			goto out_free_tagset;
		}

890 891 892
		ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
		if (IS_ERR(ctrl->ctrl.admin_q)) {
			error = PTR_ERR(ctrl->ctrl.admin_q);
893
			goto out_cleanup_fabrics_q;
894
		}
895 896
	}

897
	error = nvme_rdma_start_queue(ctrl, 0);
898 899 900
	if (error)
		goto out_cleanup_queue;

901
	error = nvme_enable_ctrl(&ctrl->ctrl);
902
	if (error)
903
		goto out_stop_queue;
904

905 906
	ctrl->ctrl.max_segments = ctrl->max_fr_pages;
	ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
907 908 909 910
	if (pi_capable)
		ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
	else
		ctrl->ctrl.max_integrity_segments = 0;
911

912 913
	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);

914 915
	error = nvme_init_identify(&ctrl->ctrl);
	if (error)
916
		goto out_stop_queue;
917 918 919

	return 0;

920 921
out_stop_queue:
	nvme_rdma_stop_queue(&ctrl->queues[0]);
922
out_cleanup_queue:
923 924
	if (new)
		blk_cleanup_queue(ctrl->ctrl.admin_q);
925 926 927
out_cleanup_fabrics_q:
	if (new)
		blk_cleanup_queue(ctrl->ctrl.fabrics_q);
928
out_free_tagset:
929
	if (new)
930
		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
931
out_free_async_qe:
932 933 934 935 936
	if (ctrl->async_event_sqe.data) {
		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
			sizeof(struct nvme_command), DMA_TO_DEVICE);
		ctrl->async_event_sqe.data = NULL;
	}
937 938 939 940 941
out_free_queue:
	nvme_rdma_free_queue(&ctrl->queues[0]);
	return error;
}

942 943 944 945 946
static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
		bool remove)
{
	if (remove) {
		blk_cleanup_queue(ctrl->ctrl.connect_q);
947
		blk_mq_free_tag_set(ctrl->ctrl.tagset);
948 949 950 951 952 953 954 955
	}
	nvme_rdma_free_io_queues(ctrl);
}

static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{
	int ret;

956
	ret = nvme_rdma_alloc_io_queues(ctrl);
957 958 959 960 961
	if (ret)
		return ret;

	if (new) {
		ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
962 963
		if (IS_ERR(ctrl->ctrl.tagset)) {
			ret = PTR_ERR(ctrl->ctrl.tagset);
964
			goto out_free_io_queues;
965
		}
966 967 968 969 970 971 972 973

		ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
		if (IS_ERR(ctrl->ctrl.connect_q)) {
			ret = PTR_ERR(ctrl->ctrl.connect_q);
			goto out_free_tag_set;
		}
	}

974
	ret = nvme_rdma_start_io_queues(ctrl);
975 976 977
	if (ret)
		goto out_cleanup_connect_q;

978 979
	if (!new) {
		nvme_start_queues(&ctrl->ctrl);
980 981 982 983 984 985 986 987 988
		if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
			/*
			 * If we timed out waiting for freeze we are likely to
			 * be stuck.  Fail the controller initialization just
			 * to be safe.
			 */
			ret = -ENODEV;
			goto out_wait_freeze_timed_out;
		}
989 990 991 992 993
		blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
			ctrl->ctrl.queue_count - 1);
		nvme_unfreeze(&ctrl->ctrl);
	}

994 995
	return 0;

996 997 998
out_wait_freeze_timed_out:
	nvme_stop_queues(&ctrl->ctrl);
	nvme_rdma_stop_io_queues(ctrl);
999 1000 1001 1002 1003
out_cleanup_connect_q:
	if (new)
		blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
	if (new)
1004
		blk_mq_free_tag_set(ctrl->ctrl.tagset);
1005 1006 1007
out_free_io_queues:
	nvme_rdma_free_io_queues(ctrl);
	return ret;
1008 1009
}

1010 1011 1012
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
		bool remove)
{
1013
	mutex_lock(&ctrl->teardown_lock);
1014 1015
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
	nvme_rdma_stop_queue(&ctrl->queues[0]);
1016
	if (ctrl->ctrl.admin_tagset) {
1017 1018
		blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
			nvme_cancel_request, &ctrl->ctrl);
1019 1020
		blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
	}
1021 1022
	if (remove)
		blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
1023
	nvme_rdma_destroy_admin_queue(ctrl, remove);
1024
	mutex_unlock(&ctrl->teardown_lock);
1025 1026 1027 1028 1029
}

static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
		bool remove)
{
1030
	mutex_lock(&ctrl->teardown_lock);
1031
	if (ctrl->ctrl.queue_count > 1) {
1032
		nvme_start_freeze(&ctrl->ctrl);
1033 1034
		nvme_stop_queues(&ctrl->ctrl);
		nvme_rdma_stop_io_queues(ctrl);
1035
		if (ctrl->ctrl.tagset) {
1036 1037
			blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
				nvme_cancel_request, &ctrl->ctrl);
1038 1039
			blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
		}
1040 1041 1042 1043
		if (remove)
			nvme_start_queues(&ctrl->ctrl);
		nvme_rdma_destroy_io_queues(ctrl, remove);
	}
1044
	mutex_unlock(&ctrl->teardown_lock);
1045 1046
}

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{
	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);

	if (list_empty(&ctrl->list))
		goto free_ctrl;

	mutex_lock(&nvme_rdma_ctrl_mutex);
	list_del(&ctrl->list);
	mutex_unlock(&nvme_rdma_ctrl_mutex);

	nvmf_free_options(nctrl->opts);
free_ctrl:
1060
	kfree(ctrl->queues);
1061 1062 1063
	kfree(ctrl);
}

S
Sagi Grimberg 已提交
1064 1065 1066
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
{
	/* If we are resetting/deleting then do nothing */
1067
	if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
S
Sagi Grimberg 已提交
1068 1069 1070 1071 1072 1073 1074 1075
		WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
			ctrl->ctrl.state == NVME_CTRL_LIVE);
		return;
	}

	if (nvmf_should_reconnect(&ctrl->ctrl)) {
		dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
			ctrl->ctrl.opts->reconnect_delay);
1076
		queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
S
Sagi Grimberg 已提交
1077 1078
				ctrl->ctrl.opts->reconnect_delay * HZ);
	} else {
1079
		nvme_delete_ctrl(&ctrl->ctrl);
S
Sagi Grimberg 已提交
1080 1081 1082
	}
}

1083
static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
1084
{
1085
	int ret = -EINVAL;
1086 1087
	bool changed;

1088
	ret = nvme_rdma_configure_admin_queue(ctrl, new);
1089
	if (ret)
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
		return ret;

	if (ctrl->ctrl.icdoff) {
		dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
		goto destroy_admin;
	}

	if (!(ctrl->ctrl.sgls & (1 << 2))) {
		dev_err(ctrl->ctrl.device,
			"Mandatory keyed sgls are not supported!\n");
		goto destroy_admin;
	}

	if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
		dev_warn(ctrl->ctrl.device,
			"queue_size %zu > ctrl sqsize %u, clamping down\n",
			ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
	}

	if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
		dev_warn(ctrl->ctrl.device,
			"sqsize %u > ctrl maxcmd %u, clamping down\n",
			ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
		ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
	}
1115

1116 1117
	if (ctrl->ctrl.sgls & (1 << 20))
		ctrl->use_inline_data = true;
1118

1119
	if (ctrl->ctrl.queue_count > 1) {
1120
		ret = nvme_rdma_configure_io_queues(ctrl, new);
1121
		if (ret)
1122
			goto destroy_admin;
1123 1124 1125
	}

	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1126
	if (!changed) {
1127
		/*
1128
		 * state change failure is ok if we started ctrl delete,
1129 1130 1131
		 * unless we're during creation of a new controller to
		 * avoid races with teardown flow.
		 */
1132 1133
		WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
			     ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
1134
		WARN_ON_ONCE(new);
1135 1136
		ret = -EINVAL;
		goto destroy_io;
1137 1138
	}

1139
	nvme_start_ctrl(&ctrl->ctrl);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	return 0;

destroy_io:
	if (ctrl->ctrl.queue_count > 1)
		nvme_rdma_destroy_io_queues(ctrl, new);
destroy_admin:
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	nvme_rdma_destroy_admin_queue(ctrl, new);
	return ret;
}

static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
{
	struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
			struct nvme_rdma_ctrl, reconnect_work);

	++ctrl->ctrl.nr_reconnects;

	if (nvme_rdma_setup_ctrl(ctrl, false))
		goto requeue;
1160

1161 1162 1163 1164
	dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
			ctrl->ctrl.nr_reconnects);

	ctrl->ctrl.nr_reconnects = 0;
1165 1166 1167 1168

	return;

requeue:
S
Sagi Grimberg 已提交
1169
	dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
1170
			ctrl->ctrl.nr_reconnects);
S
Sagi Grimberg 已提交
1171
	nvme_rdma_reconnect_or_remove(ctrl);
1172 1173 1174 1175 1176 1177 1178
}

static void nvme_rdma_error_recovery_work(struct work_struct *work)
{
	struct nvme_rdma_ctrl *ctrl = container_of(work,
			struct nvme_rdma_ctrl, err_work);

1179
	nvme_stop_keep_alive(&ctrl->ctrl);
1180
	nvme_rdma_teardown_io_queues(ctrl, false);
1181
	nvme_start_queues(&ctrl->ctrl);
1182
	nvme_rdma_teardown_admin_queue(ctrl, false);
1183
	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
1184

1185
	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
1186 1187 1188
		/* state change failure is ok if we started ctrl delete */
		WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
			     ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
1189 1190 1191
		return;
	}

S
Sagi Grimberg 已提交
1192
	nvme_rdma_reconnect_or_remove(ctrl);
1193 1194 1195 1196
}

static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
{
1197
	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
1198 1199
		return;

S
Sagi Grimberg 已提交
1200
	dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1201
	queue_work(nvme_reset_wq, &ctrl->err_work);
1202 1203
}

1204 1205 1206 1207 1208 1209
static void nvme_rdma_end_request(struct nvme_rdma_request *req)
{
	struct request *rq = blk_mq_rq_from_pdu(req);

	if (!refcount_dec_and_test(&req->ref))
		return;
1210
	if (!nvme_try_complete_req(rq, req->status, req->result))
1211
		nvme_rdma_complete_rq(rq);
1212 1213
}

1214 1215 1216
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
		const char *op)
{
1217
	struct nvme_rdma_queue *queue = wc->qp->qp_context;
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
	struct nvme_rdma_ctrl *ctrl = queue->ctrl;

	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
		dev_info(ctrl->ctrl.device,
			     "%s for CQE 0x%p failed with status %s (%d)\n",
			     op, wc->wr_cqe,
			     ib_wc_status_msg(wc->status), wc->status);
	nvme_rdma_error_recovery(ctrl);
}

static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
{
	if (unlikely(wc->status != IB_WC_SUCCESS))
		nvme_rdma_wr_error(cq, wc, "MEMREG");
}

static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
1236 1237 1238
	struct nvme_rdma_request *req =
		container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);

1239
	if (unlikely(wc->status != IB_WC_SUCCESS))
1240
		nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
1241 1242
	else
		nvme_rdma_end_request(req);
1243 1244 1245 1246 1247 1248 1249 1250 1251
}

static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
		struct nvme_rdma_request *req)
{
	struct ib_send_wr wr = {
		.opcode		    = IB_WR_LOCAL_INV,
		.next		    = NULL,
		.num_sge	    = 0,
1252
		.send_flags	    = IB_SEND_SIGNALED,
1253 1254 1255 1256 1257 1258
		.ex.invalidate_rkey = req->mr->rkey,
	};

	req->reg_cqe.done = nvme_rdma_inv_rkey_done;
	wr.wr_cqe = &req->reg_cqe;

1259
	return ib_post_send(queue->qp, &wr, NULL);
1260 1261 1262 1263 1264 1265 1266 1267
}

static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
		struct request *rq)
{
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_rdma_device *dev = queue->device;
	struct ib_device *ibdev = dev->dev;
1268
	struct list_head *pool = &queue->qp->rdma_mrs;
1269

1270
	if (!blk_rq_nr_phys_segments(rq))
1271 1272
		return;

1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
	if (blk_integrity_rq(rq)) {
		ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
				req->metadata_sgl->nents, rq_dma_dir(rq));
		sg_free_table_chained(&req->metadata_sgl->sg_table,
				      NVME_INLINE_METADATA_SG_CNT);
	}

	if (req->use_sig_mr)
		pool = &queue->qp->sig_mrs;

I
Israel Rukshin 已提交
1283
	if (req->mr) {
1284
		ib_mr_pool_put(queue->qp, pool, req->mr);
I
Israel Rukshin 已提交
1285 1286 1287
		req->mr = NULL;
	}

1288 1289 1290
	ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
			rq_dma_dir(rq));
	sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
}

static int nvme_rdma_set_sg_null(struct nvme_command *c)
{
	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;

	sg->addr = 0;
	put_unaligned_le24(0, sg->length);
	put_unaligned_le32(0, sg->key);
	sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
	return 0;
}

static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
1305 1306
		struct nvme_rdma_request *req, struct nvme_command *c,
		int count)
1307 1308
{
	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1309
	struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
1310 1311 1312
	struct ib_sge *sge = &req->sge[1];
	u32 len = 0;
	int i;
1313

1314 1315 1316 1317 1318 1319
	for (i = 0; i < count; i++, sgl++, sge++) {
		sge->addr = sg_dma_address(sgl);
		sge->length = sg_dma_len(sgl);
		sge->lkey = queue->device->pd->local_dma_lkey;
		len += sge->length;
	}
1320 1321

	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1322
	sg->length = cpu_to_le32(len);
1323 1324
	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;

1325
	req->num_sge += count;
1326 1327 1328 1329 1330 1331 1332 1333
	return 0;
}

static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
		struct nvme_rdma_request *req, struct nvme_command *c)
{
	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;

1334 1335
	sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
	put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
1336
	put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
	sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
	return 0;
}

static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
		struct nvme_rdma_request *req, struct nvme_command *c,
		int count)
{
	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
	int nr;

I
Israel Rukshin 已提交
1348 1349 1350 1351
	req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
	if (WARN_ON_ONCE(!req->mr))
		return -EAGAIN;

1352 1353 1354 1355
	/*
	 * Align the MR to a 4K page size to match the ctrl page size and
	 * the block virtual boundary.
	 */
1356 1357
	nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
			  SZ_4K);
1358
	if (unlikely(nr < count)) {
I
Israel Rukshin 已提交
1359 1360
		ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
		req->mr = NULL;
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
		if (nr < 0)
			return nr;
		return -EINVAL;
	}

	ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));

	req->reg_cqe.done = nvme_rdma_memreg_done;
	memset(&req->reg_wr, 0, sizeof(req->reg_wr));
	req->reg_wr.wr.opcode = IB_WR_REG_MR;
	req->reg_wr.wr.wr_cqe = &req->reg_cqe;
	req->reg_wr.wr.num_sge = 0;
	req->reg_wr.mr = req->mr;
	req->reg_wr.key = req->mr->rkey;
	req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
			     IB_ACCESS_REMOTE_READ |
			     IB_ACCESS_REMOTE_WRITE;

	sg->addr = cpu_to_le64(req->mr->iova);
	put_unaligned_le24(req->mr->length, sg->length);
	put_unaligned_le32(req->mr->rkey, sg->key);
	sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
			NVME_SGL_FMT_INVALIDATE;

	return 0;
}

1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
		struct nvme_command *cmd, struct ib_sig_domain *domain,
		u16 control, u8 pi_type)
{
	domain->sig_type = IB_SIG_TYPE_T10_DIF;
	domain->sig.dif.bg_type = IB_T10DIF_CRC;
	domain->sig.dif.pi_interval = 1 << bi->interval_exp;
	domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
	if (control & NVME_RW_PRINFO_PRCHK_REF)
		domain->sig.dif.ref_remap = true;

	domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
	domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
	domain->sig.dif.app_escape = true;
	if (pi_type == NVME_NS_DPS_PI_TYPE3)
		domain->sig.dif.ref_escape = true;
}

static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
		struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
		u8 pi_type)
{
	u16 control = le16_to_cpu(cmd->rw.control);

	memset(sig_attrs, 0, sizeof(*sig_attrs));
	if (control & NVME_RW_PRINFO_PRACT) {
		/* for WRITE_INSERT/READ_STRIP no memory domain */
		sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
		nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
					 pi_type);
		/* Clear the PRACT bit since HCA will generate/verify the PI */
		control &= ~NVME_RW_PRINFO_PRACT;
		cmd->rw.control = cpu_to_le16(control);
	} else {
		/* for WRITE_PASS/READ_PASS both wire/memory domains exist */
		nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
					 pi_type);
		nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
					 pi_type);
	}
}

static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
{
	*mask = 0;
	if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
		*mask |= IB_SIG_CHECK_REFTAG;
	if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
		*mask |= IB_SIG_CHECK_GUARD;
}

static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
{
	if (unlikely(wc->status != IB_WC_SUCCESS))
		nvme_rdma_wr_error(cq, wc, "SIG");
}

static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
		struct nvme_rdma_request *req, struct nvme_command *c,
		int count, int pi_count)
{
	struct nvme_rdma_sgl *sgl = &req->data_sgl;
	struct ib_reg_wr *wr = &req->reg_wr;
	struct request *rq = blk_mq_rq_from_pdu(req);
	struct nvme_ns *ns = rq->q->queuedata;
	struct bio *bio = rq->bio;
	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
	int nr;

	req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
	if (WARN_ON_ONCE(!req->mr))
		return -EAGAIN;

	nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
			     req->metadata_sgl->sg_table.sgl, pi_count, NULL,
			     SZ_4K);
	if (unlikely(nr))
		goto mr_put;

	nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c,
				req->mr->sig_attrs, ns->pi_type);
	nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);

	ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));

	req->reg_cqe.done = nvme_rdma_sig_done;
	memset(wr, 0, sizeof(*wr));
	wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
	wr->wr.wr_cqe = &req->reg_cqe;
	wr->wr.num_sge = 0;
	wr->wr.send_flags = 0;
	wr->mr = req->mr;
	wr->key = req->mr->rkey;
	wr->access = IB_ACCESS_LOCAL_WRITE |
		     IB_ACCESS_REMOTE_READ |
		     IB_ACCESS_REMOTE_WRITE;

	sg->addr = cpu_to_le64(req->mr->iova);
	put_unaligned_le24(req->mr->length, sg->length);
	put_unaligned_le32(req->mr->rkey, sg->key);
	sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;

	return 0;

mr_put:
	ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
	req->mr = NULL;
	if (nr < 0)
		return nr;
	return -EINVAL;
}

1500
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1501
		struct request *rq, struct nvme_command *c)
1502 1503 1504 1505
{
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_rdma_device *dev = queue->device;
	struct ib_device *ibdev = dev->dev;
1506
	int pi_count = 0;
1507
	int count, ret;
1508 1509

	req->num_sge = 1;
1510
	refcount_set(&req->ref, 2); /* send and recv completions */
1511 1512 1513

	c->common.flags |= NVME_CMD_SGL_METABUF;

1514
	if (!blk_rq_nr_phys_segments(rq))
1515 1516
		return nvme_rdma_set_sg_null(c);

1517 1518 1519
	req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
	ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
			blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
1520
			NVME_INLINE_SG_CNT);
1521 1522 1523
	if (ret)
		return -ENOMEM;

1524 1525
	req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
					    req->data_sgl.sg_table.sgl);
1526

1527 1528
	count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
			      req->data_sgl.nents, rq_dma_dir(rq));
1529
	if (unlikely(count <= 0)) {
1530 1531
		ret = -EIO;
		goto out_free_table;
1532 1533
	}

1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	if (blk_integrity_rq(rq)) {
		req->metadata_sgl->sg_table.sgl =
			(struct scatterlist *)(req->metadata_sgl + 1);
		ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
				blk_rq_count_integrity_sg(rq->q, rq->bio),
				req->metadata_sgl->sg_table.sgl,
				NVME_INLINE_METADATA_SG_CNT);
		if (unlikely(ret)) {
			ret = -ENOMEM;
			goto out_unmap_sg;
		}

		req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
				rq->bio, req->metadata_sgl->sg_table.sgl);
		pi_count = ib_dma_map_sg(ibdev,
					 req->metadata_sgl->sg_table.sgl,
					 req->metadata_sgl->nents,
					 rq_dma_dir(rq));
		if (unlikely(pi_count <= 0)) {
			ret = -EIO;
			goto out_free_pi_table;
		}
	}

	if (req->use_sig_mr) {
		ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
		goto out;
	}

1563
	if (count <= dev->num_inline_segments) {
1564
		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1565
		    queue->ctrl->use_inline_data &&
1566
		    blk_rq_payload_bytes(rq) <=
1567
				nvme_rdma_inline_data_size(queue)) {
1568
			ret = nvme_rdma_map_sg_inline(queue, req, c, count);
1569 1570
			goto out;
		}
1571

1572
		if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
1573 1574 1575
			ret = nvme_rdma_map_sg_single(queue, req, c);
			goto out;
		}
1576 1577
	}

1578 1579 1580
	ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
	if (unlikely(ret))
1581
		goto out_unmap_pi_sg;
1582 1583 1584

	return 0;

1585 1586 1587 1588 1589 1590 1591 1592
out_unmap_pi_sg:
	if (blk_integrity_rq(rq))
		ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
				req->metadata_sgl->nents, rq_dma_dir(rq));
out_free_pi_table:
	if (blk_integrity_rq(rq))
		sg_free_table_chained(&req->metadata_sgl->sg_table,
				      NVME_INLINE_METADATA_SG_CNT);
1593
out_unmap_sg:
1594 1595
	ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
			rq_dma_dir(rq));
1596
out_free_table:
1597
	sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1598
	return ret;
1599 1600 1601 1602
}

static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
1603 1604 1605 1606 1607
	struct nvme_rdma_qe *qe =
		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
	struct nvme_rdma_request *req =
		container_of(qe, struct nvme_rdma_request, sqe);

1608
	if (unlikely(wc->status != IB_WC_SUCCESS))
1609
		nvme_rdma_wr_error(cq, wc, "SEND");
1610 1611
	else
		nvme_rdma_end_request(req);
1612 1613 1614 1615
}

static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
		struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
1616
		struct ib_send_wr *first)
1617
{
1618
	struct ib_send_wr wr;
1619 1620 1621
	int ret;

	sge->addr   = qe->dma;
1622
	sge->length = sizeof(struct nvme_command);
1623 1624 1625 1626 1627 1628 1629
	sge->lkey   = queue->device->pd->local_dma_lkey;

	wr.next       = NULL;
	wr.wr_cqe     = &qe->cqe;
	wr.sg_list    = sge;
	wr.num_sge    = num_sge;
	wr.opcode     = IB_WR_SEND;
1630
	wr.send_flags = IB_SEND_SIGNALED;
1631 1632 1633 1634 1635 1636

	if (first)
		first->next = &wr;
	else
		first = &wr;

1637
	ret = ib_post_send(queue->qp, first, NULL);
1638
	if (unlikely(ret)) {
1639 1640 1641 1642 1643 1644 1645 1646 1647
		dev_err(queue->ctrl->ctrl.device,
			     "%s failed with error code %d\n", __func__, ret);
	}
	return ret;
}

static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
		struct nvme_rdma_qe *qe)
{
1648
	struct ib_recv_wr wr;
1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
	struct ib_sge list;
	int ret;

	list.addr   = qe->dma;
	list.length = sizeof(struct nvme_completion);
	list.lkey   = queue->device->pd->local_dma_lkey;

	qe->cqe.done = nvme_rdma_recv_done;

	wr.next     = NULL;
	wr.wr_cqe   = &qe->cqe;
	wr.sg_list  = &list;
	wr.num_sge  = 1;

1663
	ret = ib_post_recv(queue->qp, &wr, NULL);
1664
	if (unlikely(ret)) {
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
		dev_err(queue->ctrl->ctrl.device,
			"%s failed with error code %d\n", __func__, ret);
	}
	return ret;
}

static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
{
	u32 queue_idx = nvme_rdma_queue_idx(queue);

	if (queue_idx == 0)
		return queue->ctrl->admin_tag_set.tags[queue_idx];
	return queue->ctrl->tag_set.tags[queue_idx - 1];
}

1680 1681 1682 1683 1684 1685
static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
{
	if (unlikely(wc->status != IB_WC_SUCCESS))
		nvme_rdma_wr_error(cq, wc, "ASYNC");
}

1686
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
{
	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
	struct nvme_rdma_queue *queue = &ctrl->queues[0];
	struct ib_device *dev = queue->device->dev;
	struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
	struct nvme_command *cmd = sqe->data;
	struct ib_sge sge;
	int ret;

	ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);

	memset(cmd, 0, sizeof(*cmd));
	cmd->common.opcode = nvme_admin_async_event;
K
Keith Busch 已提交
1700
	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1701 1702 1703
	cmd->common.flags |= NVME_CMD_SGL_METABUF;
	nvme_rdma_set_sg_null(cmd);

1704 1705
	sqe->cqe.done = nvme_rdma_async_done;

1706 1707 1708
	ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
			DMA_TO_DEVICE);

1709
	ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
1710 1711 1712
	WARN_ON_ONCE(ret);
}

1713 1714
static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
		struct nvme_completion *cqe, struct ib_wc *wc)
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
{
	struct request *rq;
	struct nvme_rdma_request *req;

	rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
	if (!rq) {
		dev_err(queue->ctrl->ctrl.device,
			"tag 0x%x on QP %#x not found\n",
			cqe->command_id, queue->qp->qp_num);
		nvme_rdma_error_recovery(queue->ctrl);
1725
		return;
1726 1727 1728
	}
	req = blk_mq_rq_to_pdu(rq);

1729 1730
	req->status = cqe->status;
	req->result = cqe->result;
1731

1732
	if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
1733 1734
		if (unlikely(!req->mr ||
			     wc->ex.invalidate_rkey != req->mr->rkey)) {
1735 1736
			dev_err(queue->ctrl->ctrl.device,
				"Bogus remote invalidation for rkey %#x\n",
1737
				req->mr ? req->mr->rkey : 0);
1738 1739
			nvme_rdma_error_recovery(queue->ctrl);
		}
I
Israel Rukshin 已提交
1740
	} else if (req->mr) {
1741 1742
		int ret;

1743 1744 1745 1746 1747 1748 1749 1750
		ret = nvme_rdma_inv_rkey(queue, req);
		if (unlikely(ret < 0)) {
			dev_err(queue->ctrl->ctrl.device,
				"Queueing INV WR for rkey %#x failed (%d)\n",
				req->mr->rkey, ret);
			nvme_rdma_error_recovery(queue->ctrl);
		}
		/* the local invalidation completion will end the request */
1751
		return;
1752
	}
1753 1754

	nvme_rdma_end_request(req);
1755 1756
}

1757
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1758 1759 1760
{
	struct nvme_rdma_qe *qe =
		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1761
	struct nvme_rdma_queue *queue = wc->qp->qp_context;
1762 1763 1764 1765 1766 1767
	struct ib_device *ibdev = queue->device->dev;
	struct nvme_completion *cqe = qe->data;
	const size_t len = sizeof(struct nvme_completion);

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		nvme_rdma_wr_error(cq, wc, "RECV");
1768
		return;
1769 1770 1771 1772 1773 1774 1775 1776 1777
	}

	ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
1778 1779
	if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
				     cqe->command_id)))
1780 1781
		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
				&cqe->result);
1782
	else
1783
		nvme_rdma_process_nvme_rsp(queue, cqe, wc);
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
	ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);

	nvme_rdma_post_recv(queue, qe);
}

static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
{
	int ret, i;

	for (i = 0; i < queue->queue_size; i++) {
		ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
		if (ret)
			goto out_destroy_queue_ib;
	}

	return 0;

out_destroy_queue_ib:
	nvme_rdma_destroy_queue_ib(queue);
	return ret;
}

static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
		struct rdma_cm_event *ev)
{
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
	struct rdma_cm_id *cm_id = queue->cm_id;
	int status = ev->status;
	const char *rej_msg;
	const struct nvme_rdma_cm_rej *rej_data;
	u8 rej_data_len;

	rej_msg = rdma_reject_msg(cm_id, status);
	rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);

	if (rej_data && rej_data_len >= sizeof(u16)) {
		u16 sts = le16_to_cpu(rej_data->sts);
1820 1821

		dev_err(queue->ctrl->ctrl.device,
1822 1823
		      "Connect rejected: status %d (%s) nvme status %d (%s).\n",
		      status, rej_msg, sts, nvme_rdma_cm_msg(sts));
1824 1825
	} else {
		dev_err(queue->ctrl->ctrl.device,
1826
			"Connect rejected: status %d (%s).\n", status, rej_msg);
1827 1828 1829 1830 1831 1832 1833
	}

	return -ECONNRESET;
}

static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
{
1834
	struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
1835 1836
	int ret;

1837 1838 1839
	ret = nvme_rdma_create_queue_ib(queue);
	if (ret)
		return ret;
1840

1841 1842
	if (ctrl->opts->tos >= 0)
		rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
1843 1844
	ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
	if (ret) {
1845
		dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
			queue->cm_error);
		goto out_destroy_queue;
	}

	return 0;

out_destroy_queue:
	nvme_rdma_destroy_queue_ib(queue);
	return ret;
}

static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
{
	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
	struct rdma_conn_param param = { };
1861
	struct nvme_rdma_cm_req priv = { };
1862 1863 1864 1865 1866 1867
	int ret;

	param.qp_num = queue->qp->qp_num;
	param.flow_control = 1;

	param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
1868 1869
	/* maximum retry count */
	param.retry_count = 7;
1870 1871 1872 1873 1874 1875
	param.rnr_retry_count = 7;
	param.private_data = &priv;
	param.private_data_len = sizeof(priv);

	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
	priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
1876 1877 1878 1879 1880
	/*
	 * set the admin queue depth to the minimum size
	 * specified by the Fabrics standard.
	 */
	if (priv.qid == 0) {
1881 1882
		priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
		priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
1883
	} else {
1884 1885 1886 1887 1888
		/*
		 * current interpretation of the fabrics spec
		 * is at minimum you make hrqsize sqsize+1, or a
		 * 1's based representation of sqsize.
		 */
1889
		priv.hrqsize = cpu_to_le16(queue->queue_size);
1890
		priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1891
	}
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934

	ret = rdma_connect(queue->cm_id, &param);
	if (ret) {
		dev_err(ctrl->ctrl.device,
			"rdma_connect failed (%d).\n", ret);
		goto out_destroy_queue_ib;
	}

	return 0;

out_destroy_queue_ib:
	nvme_rdma_destroy_queue_ib(queue);
	return ret;
}

static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
		struct rdma_cm_event *ev)
{
	struct nvme_rdma_queue *queue = cm_id->context;
	int cm_error = 0;

	dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
		rdma_event_msg(ev->event), ev->event,
		ev->status, cm_id);

	switch (ev->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
		cm_error = nvme_rdma_addr_resolved(queue);
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
		cm_error = nvme_rdma_route_resolved(queue);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		queue->cm_error = nvme_rdma_conn_established(queue);
		/* complete cm_done regardless of success/failure */
		complete(&queue->cm_done);
		return 0;
	case RDMA_CM_EVENT_REJECTED:
		cm_error = nvme_rdma_conn_rejected(queue, ev);
		break;
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
1935
		nvme_rdma_destroy_queue_ib(queue);
1936
		fallthrough;
1937
	case RDMA_CM_EVENT_ADDR_ERROR:
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
		dev_dbg(queue->ctrl->ctrl.device,
			"CM error event %d\n", ev->event);
		cm_error = -ECONNRESET;
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
	case RDMA_CM_EVENT_ADDR_CHANGE:
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		dev_dbg(queue->ctrl->ctrl.device,
			"disconnect received - connection closed\n");
		nvme_rdma_error_recovery(queue->ctrl);
		break;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
1950 1951
		/* device removal is handled via the ib_client API */
		break;
1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
	default:
		dev_err(queue->ctrl->ctrl.device,
			"Unexpected RDMA CM event (%d)\n", ev->event);
		nvme_rdma_error_recovery(queue->ctrl);
		break;
	}

	if (cm_error) {
		queue->cm_error = cm_error;
		complete(&queue->cm_done);
	}

	return 0;
}

S
Sagi Grimberg 已提交
1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
static void nvme_rdma_complete_timed_out(struct request *rq)
{
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_rdma_queue *queue = req->queue;
	struct nvme_rdma_ctrl *ctrl = queue->ctrl;

	/* fence other contexts that may complete the command */
	mutex_lock(&ctrl->teardown_lock);
	nvme_rdma_stop_queue(queue);
	if (!blk_mq_request_completed(rq)) {
		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
		blk_mq_complete_request(rq);
	}
	mutex_unlock(&ctrl->teardown_lock);
}

1983 1984 1985 1986
static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
S
Sagi Grimberg 已提交
1987 1988
	struct nvme_rdma_queue *queue = req->queue;
	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1989

S
Sagi Grimberg 已提交
1990 1991
	dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
		 rq->tag, nvme_rdma_queue_idx(queue));
1992

S
Sagi Grimberg 已提交
1993 1994
	if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
		/*
S
Sagi Grimberg 已提交
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
		 * If we are resetting, connecting or deleting we should
		 * complete immediately because we may block controller
		 * teardown or setup sequence
		 * - ctrl disable/shutdown fabrics requests
		 * - connect requests
		 * - initialization admin requests
		 * - I/O requests that entered after unquiescing and
		 *   the controller stopped responding
		 *
		 * All other requests should be cancelled by the error
		 * recovery work, so it's fine that we fail it here.
S
Sagi Grimberg 已提交
2006
		 */
S
Sagi Grimberg 已提交
2007
		nvme_rdma_complete_timed_out(rq);
S
Sagi Grimberg 已提交
2008 2009
		return BLK_EH_DONE;
	}
2010

S
Sagi Grimberg 已提交
2011 2012 2013 2014
	/*
	 * LIVE state should trigger the normal error recovery which will
	 * handle completing this request.
	 */
S
Sagi Grimberg 已提交
2015 2016
	nvme_rdma_error_recovery(ctrl);
	return BLK_EH_RESET_TIMER;
2017 2018
}

2019
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
2020 2021 2022 2023 2024 2025 2026 2027 2028
		const struct blk_mq_queue_data *bd)
{
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_rdma_queue *queue = hctx->driver_data;
	struct request *rq = bd->rq;
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_rdma_qe *sqe = &req->sqe;
	struct nvme_command *c = sqe->data;
	struct ib_device *dev;
2029
	bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
2030 2031
	blk_status_t ret;
	int err;
2032 2033 2034

	WARN_ON_ONCE(rq->tag < 0);

2035
	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2036
		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2037

2038
	dev = queue->device->dev;
2039 2040 2041 2042 2043 2044 2045 2046

	req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
					 sizeof(struct nvme_command),
					 DMA_TO_DEVICE);
	err = ib_dma_mapping_error(dev, req->sqe.dma);
	if (unlikely(err))
		return BLK_STS_RESOURCE;

2047 2048 2049 2050
	ib_dma_sync_single_for_cpu(dev, sqe->dma,
			sizeof(struct nvme_command), DMA_TO_DEVICE);

	ret = nvme_setup_cmd(ns, rq, c);
2051
	if (ret)
2052
		goto unmap_qe;
2053 2054 2055

	blk_mq_start_request(rq);

2056 2057 2058 2059 2060 2061 2062 2063 2064
	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
	    queue->pi_support &&
	    (c->common.opcode == nvme_cmd_write ||
	     c->common.opcode == nvme_cmd_read) &&
	    nvme_ns_has_pi(ns))
		req->use_sig_mr = true;
	else
		req->use_sig_mr = false;

2065
	err = nvme_rdma_map_data(queue, rq, c);
2066
	if (unlikely(err < 0)) {
2067
		dev_err(queue->ctrl->ctrl.device,
2068
			     "Failed to map data (%d)\n", err);
2069 2070 2071
		goto err;
	}

2072 2073
	sqe->cqe.done = nvme_rdma_send_done;

2074 2075 2076
	ib_dma_sync_single_for_device(dev, sqe->dma,
			sizeof(struct nvme_command), DMA_TO_DEVICE);

2077
	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
I
Israel Rukshin 已提交
2078
			req->mr ? &req->reg_wr.wr : NULL);
2079 2080
	if (unlikely(err))
		goto err_unmap;
2081

2082
	return BLK_STS_OK;
2083

2084 2085
err_unmap:
	nvme_rdma_unmap_data(queue, rq);
2086
err:
2087
	if (err == -ENOMEM || err == -EAGAIN)
2088 2089 2090
		ret = BLK_STS_RESOURCE;
	else
		ret = BLK_STS_IOERR;
2091
	nvme_cleanup_cmd(rq);
2092 2093 2094 2095
unmap_qe:
	ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
			    DMA_TO_DEVICE);
	return ret;
2096 2097
}

2098 2099 2100 2101 2102 2103 2104
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
{
	struct nvme_rdma_queue *queue = hctx->driver_data;

	return ib_process_cq_direct(queue->ib_cq, -1);
}

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
{
	struct request *rq = blk_mq_rq_from_pdu(req);
	struct ib_mr_status mr_status;
	int ret;

	ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
	if (ret) {
		pr_err("ib_check_mr_status failed, ret %d\n", ret);
		nvme_req(rq)->status = NVME_SC_INVALID_PI;
		return;
	}

	if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
		switch (mr_status.sig_err.err_type) {
		case IB_SIG_BAD_GUARD:
			nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
			break;
		case IB_SIG_BAD_REFTAG:
			nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
			break;
		case IB_SIG_BAD_APPTAG:
			nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
			break;
		}
		pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
		       mr_status.sig_err.err_type, mr_status.sig_err.expected,
		       mr_status.sig_err.actual);
	}
}

2136 2137 2138
static void nvme_rdma_complete_rq(struct request *rq)
{
	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2139 2140
	struct nvme_rdma_queue *queue = req->queue;
	struct ib_device *ibdev = queue->device->dev;
2141

2142 2143 2144
	if (req->use_sig_mr)
		nvme_rdma_check_pi_status(req);

2145 2146 2147
	nvme_rdma_unmap_data(queue, rq);
	ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
			    DMA_TO_DEVICE);
2148
	nvme_complete_rq(rq);
2149 2150
}

2151 2152 2153
static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_rdma_ctrl *ctrl = set->driver_data;
2154
	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2155

2156
	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2157
		/* separate read/write queues */
2158 2159 2160 2161 2162
		set->map[HCTX_TYPE_DEFAULT].nr_queues =
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
		set->map[HCTX_TYPE_READ].nr_queues =
			ctrl->io_queues[HCTX_TYPE_READ];
2163
		set->map[HCTX_TYPE_READ].queue_offset =
2164
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2165
	} else {
2166 2167 2168 2169 2170 2171
		/* shared read/write queues */
		set->map[HCTX_TYPE_DEFAULT].nr_queues =
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
		set->map[HCTX_TYPE_READ].nr_queues =
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2172 2173 2174 2175 2176 2177
		set->map[HCTX_TYPE_READ].queue_offset = 0;
	}
	blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
			ctrl->device->dev, 0);
	blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
			ctrl->device->dev, 0);
2178

2179 2180
	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
		/* map dedicated poll queues only if we have queues left */
2181
		set->map[HCTX_TYPE_POLL].nr_queues =
2182
				ctrl->io_queues[HCTX_TYPE_POLL];
2183
		set->map[HCTX_TYPE_POLL].queue_offset =
2184 2185
			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
			ctrl->io_queues[HCTX_TYPE_READ];
2186 2187
		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
	}
2188 2189 2190 2191 2192 2193 2194

	dev_info(ctrl->ctrl.device,
		"mapped %d/%d/%d default/read/poll queues.\n",
		ctrl->io_queues[HCTX_TYPE_DEFAULT],
		ctrl->io_queues[HCTX_TYPE_READ],
		ctrl->io_queues[HCTX_TYPE_POLL]);

2195
	return 0;
2196 2197
}

2198
static const struct blk_mq_ops nvme_rdma_mq_ops = {
2199 2200 2201 2202 2203 2204
	.queue_rq	= nvme_rdma_queue_rq,
	.complete	= nvme_rdma_complete_rq,
	.init_request	= nvme_rdma_init_request,
	.exit_request	= nvme_rdma_exit_request,
	.init_hctx	= nvme_rdma_init_hctx,
	.timeout	= nvme_rdma_timeout,
2205
	.map_queues	= nvme_rdma_map_queues,
2206
	.poll		= nvme_rdma_poll,
2207 2208
};

2209
static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
2210 2211
	.queue_rq	= nvme_rdma_queue_rq,
	.complete	= nvme_rdma_complete_rq,
2212 2213
	.init_request	= nvme_rdma_init_request,
	.exit_request	= nvme_rdma_exit_request,
2214 2215 2216 2217
	.init_hctx	= nvme_rdma_init_admin_hctx,
	.timeout	= nvme_rdma_timeout,
};

2218
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
2219
{
2220 2221 2222
	cancel_work_sync(&ctrl->err_work);
	cancel_delayed_work_sync(&ctrl->reconnect_work);

2223
	nvme_rdma_teardown_io_queues(ctrl, shutdown);
2224
	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2225
	if (shutdown)
2226
		nvme_shutdown_ctrl(&ctrl->ctrl);
2227
	else
2228
		nvme_disable_ctrl(&ctrl->ctrl);
2229
	nvme_rdma_teardown_admin_queue(ctrl, shutdown);
2230 2231
}

2232
static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
2233
{
2234
	nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
2235 2236 2237 2238
}

static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
{
2239 2240
	struct nvme_rdma_ctrl *ctrl =
		container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
2241

2242
	nvme_stop_ctrl(&ctrl->ctrl);
2243
	nvme_rdma_shutdown_ctrl(ctrl, false);
2244

2245
	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2246 2247 2248 2249 2250
		/* state change failure should never happen */
		WARN_ON_ONCE(1);
		return;
	}

2251
	if (nvme_rdma_setup_ctrl(ctrl, false))
2252
		goto out_fail;
2253 2254 2255

	return;

2256
out_fail:
2257 2258
	++ctrl->ctrl.nr_reconnects;
	nvme_rdma_reconnect_or_remove(ctrl);
2259 2260 2261 2262 2263
}

static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
	.name			= "rdma",
	.module			= THIS_MODULE,
2264
	.flags			= NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
2265 2266 2267 2268 2269
	.reg_read32		= nvmf_reg_read32,
	.reg_read64		= nvmf_reg_read64,
	.reg_write32		= nvmf_reg_write32,
	.free_ctrl		= nvme_rdma_free_ctrl,
	.submit_async_event	= nvme_rdma_submit_async_event,
2270
	.delete_ctrl		= nvme_rdma_delete_ctrl,
2271 2272 2273
	.get_address		= nvmf_get_address,
};

2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293
/*
 * Fails a connection request if it matches an existing controller
 * (association) with the same tuple:
 * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
 *
 * if local address is not specified in the request, it will match an
 * existing controller with all the other parameters the same and no
 * local port address specified as well.
 *
 * The ports don't need to be compared as they are intrinsically
 * already matched by the port pointers supplied.
 */
static bool
nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
{
	struct nvme_rdma_ctrl *ctrl;
	bool found = false;

	mutex_lock(&nvme_rdma_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2294
		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2295 2296 2297 2298 2299 2300 2301 2302
		if (found)
			break;
	}
	mutex_unlock(&nvme_rdma_ctrl_mutex);

	return found;
}

2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{
	struct nvme_rdma_ctrl *ctrl;
	int ret;
	bool changed;

	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return ERR_PTR(-ENOMEM);
	ctrl->ctrl.opts = opts;
	INIT_LIST_HEAD(&ctrl->list);
2315
	mutex_init(&ctrl->teardown_lock);
2316

2317 2318 2319 2320 2321 2322 2323 2324 2325
	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
		opts->trsvcid =
			kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
		if (!opts->trsvcid) {
			ret = -ENOMEM;
			goto out_free_ctrl;
		}
		opts->mask |= NVMF_OPT_TRSVCID;
	}
2326 2327

	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2328
			opts->traddr, opts->trsvcid, &ctrl->addr);
2329
	if (ret) {
2330 2331
		pr_err("malformed address passed: %s:%s\n",
			opts->traddr, opts->trsvcid);
2332 2333 2334
		goto out_free_ctrl;
	}

2335
	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2336 2337
		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
			opts->host_traddr, NULL, &ctrl->src_addr);
2338
		if (ret) {
2339
			pr_err("malformed src address passed: %s\n",
2340 2341 2342 2343 2344
			       opts->host_traddr);
			goto out_free_ctrl;
		}
	}

2345 2346 2347 2348 2349
	if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
		ret = -EALREADY;
		goto out_free_ctrl;
	}

2350 2351 2352
	INIT_DELAYED_WORK(&ctrl->reconnect_work,
			nvme_rdma_reconnect_ctrl_work);
	INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
2353
	INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
2354

2355 2356
	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
				opts->nr_poll_queues + 1;
2357
	ctrl->ctrl.sqsize = opts->queue_size - 1;
2358 2359 2360
	ctrl->ctrl.kato = opts->kato;

	ret = -ENOMEM;
2361
	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2362 2363
				GFP_KERNEL);
	if (!ctrl->queues)
2364 2365 2366 2367 2368 2369
		goto out_free_ctrl;

	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
				0 /* no quirks, we're perfect! */);
	if (ret)
		goto out_kfree_queues;
2370

2371 2372 2373
	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
	WARN_ON_ONCE(!changed);

2374
	ret = nvme_rdma_setup_ctrl(ctrl, true);
2375
	if (ret)
2376
		goto out_uninit_ctrl;
2377

2378
	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);

	mutex_lock(&nvme_rdma_ctrl_mutex);
	list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
	mutex_unlock(&nvme_rdma_ctrl_mutex);

	return &ctrl->ctrl;

out_uninit_ctrl:
	nvme_uninit_ctrl(&ctrl->ctrl);
	nvme_put_ctrl(&ctrl->ctrl);
	if (ret > 0)
		ret = -EIO;
	return ERR_PTR(ret);
2393 2394
out_kfree_queues:
	kfree(ctrl->queues);
2395 2396 2397 2398 2399 2400 2401
out_free_ctrl:
	kfree(ctrl);
	return ERR_PTR(ret);
}

static struct nvmf_transport_ops nvme_rdma_transport = {
	.name		= "rdma",
2402
	.module		= THIS_MODULE,
2403
	.required_opts	= NVMF_OPT_TRADDR,
2404
	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2405
			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2406 2407
			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
			  NVMF_OPT_TOS,
2408 2409 2410
	.create_ctrl	= nvme_rdma_create_ctrl,
};

2411 2412 2413
static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{
	struct nvme_rdma_ctrl *ctrl;
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
	struct nvme_rdma_device *ndev;
	bool found = false;

	mutex_lock(&device_list_mutex);
	list_for_each_entry(ndev, &device_list, entry) {
		if (ndev->dev == ib_device) {
			found = true;
			break;
		}
	}
	mutex_unlock(&device_list_mutex);

	if (!found)
		return;
2428 2429 2430 2431 2432 2433

	/* Delete all controllers using this device */
	mutex_lock(&nvme_rdma_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
		if (ctrl->device->dev != ib_device)
			continue;
2434
		nvme_delete_ctrl(&ctrl->ctrl);
2435 2436 2437
	}
	mutex_unlock(&nvme_rdma_ctrl_mutex);

2438
	flush_workqueue(nvme_delete_wq);
2439 2440 2441 2442 2443 2444 2445
}

static struct ib_client nvme_rdma_ib_client = {
	.name   = "nvme_rdma",
	.remove = nvme_rdma_remove_one
};

2446 2447
static int __init nvme_rdma_init_module(void)
{
2448 2449 2450
	int ret;

	ret = ib_register_client(&nvme_rdma_ib_client);
2451
	if (ret)
2452
		return ret;
2453 2454 2455 2456

	ret = nvmf_register_transport(&nvme_rdma_transport);
	if (ret)
		goto err_unreg_client;
2457

2458
	return 0;
2459

2460 2461 2462
err_unreg_client:
	ib_unregister_client(&nvme_rdma_ib_client);
	return ret;
2463 2464 2465 2466
}

static void __exit nvme_rdma_cleanup_module(void)
{
2467 2468
	struct nvme_rdma_ctrl *ctrl;

2469
	nvmf_unregister_transport(&nvme_rdma_transport);
2470
	ib_unregister_client(&nvme_rdma_ib_client);
2471 2472 2473 2474 2475 2476

	mutex_lock(&nvme_rdma_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
		nvme_delete_ctrl(&ctrl->ctrl);
	mutex_unlock(&nvme_rdma_ctrl_mutex);
	flush_workqueue(nvme_delete_wq);
2477 2478 2479 2480 2481 2482
}

module_init(nvme_rdma_init_module);
module_exit(nvme_rdma_cleanup_module);

MODULE_LICENSE("GPL v2");