rdma.c 40.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * NVMe over Fabrics RDMA target.
 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/atomic.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/nvme.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/wait.h>
#include <linux/inet.h>
#include <asm/unaligned.h>

#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>

#include <linux/nvme-rdma.h>
#include "nvmet.h"

/*
28
 * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
29
 */
30 31 32
#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE	PAGE_SIZE
#define NVMET_RDMA_MAX_INLINE_SGE		4
#define NVMET_RDMA_MAX_INLINE_DATA_SIZE		max_t(int, SZ_16K, PAGE_SIZE)
33 34

struct nvmet_rdma_cmd {
35
	struct ib_sge		sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
36 37
	struct ib_cqe		cqe;
	struct ib_recv_wr	wr;
38
	struct scatterlist	inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
	struct nvme_command     *nvme_cmd;
	struct nvmet_rdma_queue	*queue;
};

enum {
	NVMET_RDMA_REQ_INLINE_DATA	= (1 << 0),
	NVMET_RDMA_REQ_INVALIDATE_RKEY	= (1 << 1),
};

struct nvmet_rdma_rsp {
	struct ib_sge		send_sge;
	struct ib_cqe		send_cqe;
	struct ib_send_wr	send_wr;

	struct nvmet_rdma_cmd	*cmd;
	struct nvmet_rdma_queue	*queue;

	struct ib_cqe		read_cqe;
	struct rdma_rw_ctx	rw;

	struct nvmet_req	req;

61
	bool			allocated;
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	u8			n_rdma;
	u32			flags;
	u32			invalidate_rkey;

	struct list_head	wait_list;
	struct list_head	free_list;
};

enum nvmet_rdma_queue_state {
	NVMET_RDMA_Q_CONNECTING,
	NVMET_RDMA_Q_LIVE,
	NVMET_RDMA_Q_DISCONNECTING,
};

struct nvmet_rdma_queue {
	struct rdma_cm_id	*cm_id;
	struct nvmet_port	*port;
	struct ib_cq		*cq;
	atomic_t		sq_wr_avail;
	struct nvmet_rdma_device *dev;
	spinlock_t		state_lock;
	enum nvmet_rdma_queue_state state;
	struct nvmet_cq		nvme_cq;
	struct nvmet_sq		nvme_sq;

	struct nvmet_rdma_rsp	*rsps;
	struct list_head	free_rsps;
	spinlock_t		rsps_lock;
	struct nvmet_rdma_cmd	*cmds;

	struct work_struct	release_work;
	struct list_head	rsp_wait_list;
	struct list_head	rsp_wr_wait_list;
	spinlock_t		rsp_wr_wait_lock;

	int			idx;
	int			host_qid;
	int			recv_queue_size;
	int			send_queue_size;

	struct list_head	queue_list;
};

struct nvmet_rdma_device {
	struct ib_device	*device;
	struct ib_pd		*pd;
	struct ib_srq		*srq;
	struct nvmet_rdma_cmd	*srq_cmds;
	size_t			srq_size;
	struct kref		ref;
	struct list_head	entry;
113 114
	int			inline_data_size;
	int			inline_page_count;
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
};

static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");

static DEFINE_IDA(nvmet_rdma_queue_ida);
static LIST_HEAD(nvmet_rdma_queue_list);
static DEFINE_MUTEX(nvmet_rdma_queue_mutex);

static LIST_HEAD(device_list);
static DEFINE_MUTEX(device_list_mutex);

static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
134 135 136 137
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_rsp *r);
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_rsp *r);
138

139
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
140

141 142 143 144 145
static int num_pages(int len)
{
	return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
}

146 147 148 149 150 151 152 153 154
/* XXX: really should move to a generic header sooner or later.. */
static inline u32 get_unaligned_le24(const u8 *p)
{
	return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
}

static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
{
	return nvme_is_write(rsp->req.cmd) &&
155
		rsp->req.transfer_len &&
156 157 158 159 160 161
		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}

static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
{
	return !nvme_is_write(rsp->req.cmd) &&
162
		rsp->req.transfer_len &&
163
		!rsp->req.cqe->status &&
164 165 166 167 168 169 170 171 172 173
		!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}

static inline struct nvmet_rdma_rsp *
nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
{
	struct nvmet_rdma_rsp *rsp;
	unsigned long flags;

	spin_lock_irqsave(&queue->rsps_lock, flags);
174
	rsp = list_first_entry_or_null(&queue->free_rsps,
175
				struct nvmet_rdma_rsp, free_list);
176 177
	if (likely(rsp))
		list_del(&rsp->free_list);
178 179
	spin_unlock_irqrestore(&queue->rsps_lock, flags);

180
	if (unlikely(!rsp)) {
181 182 183
		int ret;

		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
184 185
		if (unlikely(!rsp))
			return NULL;
186 187 188 189 190 191
		ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
		if (unlikely(ret)) {
			kfree(rsp);
			return NULL;
		}

192 193 194
		rsp->allocated = true;
	}

195 196 197 198 199 200 201 202
	return rsp;
}

static inline void
nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{
	unsigned long flags;

203
	if (unlikely(rsp->allocated)) {
204
		nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
205 206 207 208
		kfree(rsp);
		return;
	}

209 210 211 212 213
	spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
	list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
	spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_cmd *c)
{
	struct scatterlist *sg;
	struct ib_sge *sge;
	int i;

	if (!ndev->inline_data_size)
		return;

	sg = c->inline_sg;
	sge = &c->sge[1];

	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
		if (sge->length)
			ib_dma_unmap_page(ndev->device, sge->addr,
					sge->length, DMA_FROM_DEVICE);
		if (sg_page(sg))
			__free_page(sg_page(sg));
	}
}

static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
				struct nvmet_rdma_cmd *c)
{
	struct scatterlist *sg;
	struct ib_sge *sge;
	struct page *pg;
	int len;
	int i;

	if (!ndev->inline_data_size)
		return 0;

	sg = c->inline_sg;
	sg_init_table(sg, ndev->inline_page_count);
	sge = &c->sge[1];
	len = ndev->inline_data_size;

	for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
		pg = alloc_page(GFP_KERNEL);
		if (!pg)
			goto out_err;
		sg_assign_page(sg, pg);
		sge->addr = ib_dma_map_page(ndev->device,
			pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
		if (ib_dma_mapping_error(ndev->device, sge->addr))
			goto out_err;
		sge->length = min_t(int, len, PAGE_SIZE);
		sge->lkey = ndev->pd->local_dma_lkey;
		len -= sge->length;
	}

	return 0;
out_err:
	for (; i >= 0; i--, sg--, sge--) {
		if (sge->length)
			ib_dma_unmap_page(ndev->device, sge->addr,
					sge->length, DMA_FROM_DEVICE);
		if (sg_page(sg))
			__free_page(sg_page(sg));
	}
	return -ENOMEM;
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
			struct nvmet_rdma_cmd *c, bool admin)
{
	/* NVMe command / RDMA RECV */
	c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
	if (!c->nvme_cmd)
		goto out;

	c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
	if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
		goto out_free_cmd;

	c->sge[0].length = sizeof(*c->nvme_cmd);
	c->sge[0].lkey = ndev->pd->local_dma_lkey;

295 296
	if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
		goto out_unmap_cmd;
297 298 299 300 301

	c->cqe.done = nvmet_rdma_recv_done;

	c->wr.wr_cqe = &c->cqe;
	c->wr.sg_list = c->sge;
302
	c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318

	return 0;

out_unmap_cmd:
	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
			sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
out_free_cmd:
	kfree(c->nvme_cmd);

out:
	return -ENOMEM;
}

static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_cmd *c, bool admin)
{
319 320
	if (!admin)
		nvmet_rdma_free_inline_pages(ndev, c);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	ib_dma_unmap_single(ndev->device, c->sge[0].addr,
				sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
	kfree(c->nvme_cmd);
}

static struct nvmet_rdma_cmd *
nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
		int nr_cmds, bool admin)
{
	struct nvmet_rdma_cmd *cmds;
	int ret = -EINVAL, i;

	cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
	if (!cmds)
		goto out;

	for (i = 0; i < nr_cmds; i++) {
		ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
		if (ret)
			goto out_free;
	}

	return cmds;

out_free:
	while (--i >= 0)
		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
	kfree(cmds);
out:
	return ERR_PTR(ret);
}

static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
{
	int i;

	for (i = 0; i < nr_cmds; i++)
		nvmet_rdma_free_cmd(ndev, cmds + i, admin);
	kfree(cmds);
}

static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_rsp *r)
{
	/* NVMe CQE / RDMA SEND */
367 368
	r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
	if (!r->req.cqe)
369 370
		goto out;

371 372
	r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
			sizeof(*r->req.cqe), DMA_TO_DEVICE);
373 374 375
	if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
		goto out_free_rsp;

376
	r->req.p2p_client = &ndev->device->dev;
377
	r->send_sge.length = sizeof(*r->req.cqe);
378 379 380 381 382 383 384 385 386 387 388 389 390 391
	r->send_sge.lkey = ndev->pd->local_dma_lkey;

	r->send_cqe.done = nvmet_rdma_send_done;

	r->send_wr.wr_cqe = &r->send_cqe;
	r->send_wr.sg_list = &r->send_sge;
	r->send_wr.num_sge = 1;
	r->send_wr.send_flags = IB_SEND_SIGNALED;

	/* Data In / RDMA READ */
	r->read_cqe.done = nvmet_rdma_read_data_done;
	return 0;

out_free_rsp:
392
	kfree(r->req.cqe);
393 394 395 396 397 398 399 400
out:
	return -ENOMEM;
}

static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_rsp *r)
{
	ib_dma_unmap_single(ndev->device, r->send_sge.addr,
401 402
				sizeof(*r->req.cqe), DMA_TO_DEVICE);
	kfree(r->req.cqe);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
}

static int
nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
{
	struct nvmet_rdma_device *ndev = queue->dev;
	int nr_rsps = queue->recv_queue_size * 2;
	int ret = -EINVAL, i;

	queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
			GFP_KERNEL);
	if (!queue->rsps)
		goto out;

	for (i = 0; i < nr_rsps; i++) {
		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];

		ret = nvmet_rdma_alloc_rsp(ndev, rsp);
		if (ret)
			goto out_free;

		list_add_tail(&rsp->free_list, &queue->free_rsps);
	}

	return 0;

out_free:
	while (--i >= 0) {
		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];

		list_del(&rsp->free_list);
		nvmet_rdma_free_rsp(ndev, rsp);
	}
	kfree(queue->rsps);
out:
	return ret;
}

static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
{
	struct nvmet_rdma_device *ndev = queue->dev;
	int i, nr_rsps = queue->recv_queue_size * 2;

	for (i = 0; i < nr_rsps; i++) {
		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];

		list_del(&rsp->free_list);
		nvmet_rdma_free_rsp(ndev, rsp);
	}
	kfree(queue->rsps);
}

static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
		struct nvmet_rdma_cmd *cmd)
{
458
	int ret;
459

460 461 462 463
	ib_dma_sync_single_for_device(ndev->device,
		cmd->sge[0].addr, cmd->sge[0].length,
		DMA_FROM_DEVICE);

464
	if (ndev->srq)
465
		ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
466
	else
467
		ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
468 469 470 471 472

	if (unlikely(ret))
		pr_err("post_recv cmd failed\n");

	return ret;
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
}

static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
{
	spin_lock(&queue->rsp_wr_wait_lock);
	while (!list_empty(&queue->rsp_wr_wait_list)) {
		struct nvmet_rdma_rsp *rsp;
		bool ret;

		rsp = list_entry(queue->rsp_wr_wait_list.next,
				struct nvmet_rdma_rsp, wait_list);
		list_del(&rsp->wait_list);

		spin_unlock(&queue->rsp_wr_wait_lock);
		ret = nvmet_rdma_execute_command(rsp);
		spin_lock(&queue->rsp_wr_wait_lock);

		if (!ret) {
			list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
			break;
		}
	}
	spin_unlock(&queue->rsp_wr_wait_lock);
}


static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
{
	struct nvmet_rdma_queue *queue = rsp->queue;

	atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);

	if (rsp->n_rdma) {
		rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
				queue->cm_id->port_num, rsp->req.sg,
				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
	}

511
	if (rsp->req.sg != rsp->cmd->inline_sg)
512
		nvmet_req_free_sgl(&rsp->req);
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537

	if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
		nvmet_rdma_process_wr_wait_list(queue);

	nvmet_rdma_put_rsp(rsp);
}

static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
{
	if (queue->nvme_sq.ctrl) {
		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
	} else {
		/*
		 * we didn't setup the controller yet in case
		 * of admin connect error, just disconnect and
		 * cleanup the queue
		 */
		nvmet_rdma_queue_disconnect(queue);
	}
}

static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct nvmet_rdma_rsp *rsp =
		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
538
	struct nvmet_rdma_queue *queue = cq->cq_context;
539 540 541 542 543 544 545

	nvmet_rdma_release_rsp(rsp);

	if (unlikely(wc->status != IB_WC_SUCCESS &&
		     wc->status != IB_WC_WR_FLUSH_ERR)) {
		pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
			wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
546
		nvmet_rdma_error_comp(queue);
547 548 549 550 551 552 553 554
	}
}

static void nvmet_rdma_queue_response(struct nvmet_req *req)
{
	struct nvmet_rdma_rsp *rsp =
		container_of(req, struct nvmet_rdma_rsp, req);
	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
555
	struct ib_send_wr *first_wr;
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570

	if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
		rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
		rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
	} else {
		rsp->send_wr.opcode = IB_WR_SEND;
	}

	if (nvmet_rdma_need_data_out(rsp))
		first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
				cm_id->port_num, NULL, &rsp->send_wr);
	else
		first_wr = &rsp->send_wr;

	nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
571 572 573 574 575

	ib_dma_sync_single_for_device(rsp->queue->dev->device,
		rsp->send_sge.addr, rsp->send_sge.length,
		DMA_TO_DEVICE);

576
	if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
		pr_err("sending cmd response failed\n");
		nvmet_rdma_release_rsp(rsp);
	}
}

static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct nvmet_rdma_rsp *rsp =
		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
	struct nvmet_rdma_queue *queue = cq->cq_context;

	WARN_ON(rsp->n_rdma <= 0);
	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
	rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
			queue->cm_id->port_num, rsp->req.sg,
			rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
	rsp->n_rdma = 0;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
596
		nvmet_req_uninit(&rsp->req);
597 598 599 600 601 602 603 604 605
		nvmet_rdma_release_rsp(rsp);
		if (wc->status != IB_WC_WR_FLUSH_ERR) {
			pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
				wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
			nvmet_rdma_error_comp(queue);
		}
		return;
	}

606
	rsp->req.execute(&rsp->req);
607 608 609 610 611
}

static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
		u64 off)
{
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	int sg_count = num_pages(len);
	struct scatterlist *sg;
	int i;

	sg = rsp->cmd->inline_sg;
	for (i = 0; i < sg_count; i++, sg++) {
		if (i < sg_count - 1)
			sg_unmark_end(sg);
		else
			sg_mark_end(sg);
		sg->offset = off;
		sg->length = min_t(int, len, PAGE_SIZE - off);
		len -= sg->length;
		if (!i)
			off = 0;
	}

	rsp->req.sg = rsp->cmd->inline_sg;
	rsp->req.sg_cnt = sg_count;
631 632 633 634 635 636 637 638
}

static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
{
	struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
	u64 off = le64_to_cpu(sgl->addr);
	u32 len = le32_to_cpu(sgl->length);

639 640 641
	if (!nvme_is_write(rsp->req.cmd)) {
		rsp->req.error_loc =
			offsetof(struct nvme_common_command, opcode);
642
		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
643
	}
644

645
	if (off + len > rsp->queue->dev->inline_data_size) {
646 647 648 649 650 651 652 653 654 655
		pr_err("invalid inline data offset!\n");
		return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
	}

	/* no data command? */
	if (!len)
		return 0;

	nvmet_rdma_use_inline_sg(rsp, len, off);
	rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
656
	rsp->req.transfer_len += len;
657 658 659 660 661 662 663 664 665 666 667
	return 0;
}

static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
		struct nvme_keyed_sgl_desc *sgl, bool invalidate)
{
	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
	u64 addr = le64_to_cpu(sgl->addr);
	u32 key = get_unaligned_le32(sgl->key);
	int ret;

668 669
	rsp->req.transfer_len = get_unaligned_le24(sgl->length);

670
	/* no data command? */
671
	if (!rsp->req.transfer_len)
672 673
		return 0;

674
	ret = nvmet_req_alloc_sgl(&rsp->req);
675
	if (unlikely(ret < 0))
676
		goto error_out;
677 678 679 680

	ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
			rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
			nvmet_data_dir(&rsp->req));
681
	if (unlikely(ret < 0))
682
		goto error_out;
683 684 685 686 687 688 689 690
	rsp->n_rdma += ret;

	if (invalidate) {
		rsp->invalidate_rkey = key;
		rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
	}

	return 0;
691 692 693 694

error_out:
	rsp->req.transfer_len = 0;
	return NVME_SC_INTERNAL;
695 696 697 698 699 700 701 702 703 704 705 706 707
}

static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
{
	struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;

	switch (sgl->type >> 4) {
	case NVME_SGL_FMT_DATA_DESC:
		switch (sgl->type & 0xf) {
		case NVME_SGL_FMT_OFFSET:
			return nvmet_rdma_map_sgl_inline(rsp);
		default:
			pr_err("invalid SGL subtype: %#x\n", sgl->type);
708 709
			rsp->req.error_loc =
				offsetof(struct nvme_common_command, dptr);
710 711 712 713 714 715 716 717 718 719
			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		}
	case NVME_KEY_SGL_FMT_DATA_DESC:
		switch (sgl->type & 0xf) {
		case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
			return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
		case NVME_SGL_FMT_ADDRESS:
			return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
		default:
			pr_err("invalid SGL subtype: %#x\n", sgl->type);
720 721
			rsp->req.error_loc =
				offsetof(struct nvme_common_command, dptr);
722 723 724 725
			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		}
	default:
		pr_err("invalid SGL type: %#x\n", sgl->type);
726
		rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
		return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
	}
}

static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
{
	struct nvmet_rdma_queue *queue = rsp->queue;

	if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
			&queue->sq_wr_avail) < 0)) {
		pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
				1 + rsp->n_rdma, queue->idx,
				queue->nvme_sq.ctrl->cntlid);
		atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
		return false;
	}

	if (nvmet_rdma_need_data_in(rsp)) {
		if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
				queue->cm_id->port_num, &rsp->read_cqe, NULL))
			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
	} else {
749
		rsp->req.execute(&rsp->req);
750 751 752 753 754 755 756 757 758 759
	}

	return true;
}

static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
		struct nvmet_rdma_rsp *cmd)
{
	u16 status;

760 761 762 763 764 765 766
	ib_dma_sync_single_for_cpu(queue->dev->device,
		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
		DMA_FROM_DEVICE);
	ib_dma_sync_single_for_cpu(queue->dev->device,
		cmd->send_sge.addr, cmd->send_sge.length,
		DMA_TO_DEVICE);

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
			&queue->nvme_sq, &nvmet_rdma_ops))
		return;

	status = nvmet_rdma_map_sgl(cmd);
	if (status)
		goto out_err;

	if (unlikely(!nvmet_rdma_execute_command(cmd))) {
		spin_lock(&queue->rsp_wr_wait_lock);
		list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
		spin_unlock(&queue->rsp_wr_wait_lock);
	}

	return;

out_err:
	nvmet_req_complete(&cmd->req, status);
}

static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
	struct nvmet_rdma_cmd *cmd =
		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
	struct nvmet_rdma_queue *queue = cq->cq_context;
	struct nvmet_rdma_rsp *rsp;

	if (unlikely(wc->status != IB_WC_SUCCESS)) {
		if (wc->status != IB_WC_WR_FLUSH_ERR) {
			pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
				wc->wr_cqe, ib_wc_status_msg(wc->status),
				wc->status);
			nvmet_rdma_error_comp(queue);
		}
		return;
	}

	if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
		pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
		nvmet_rdma_error_comp(queue);
		return;
	}

	cmd->queue = queue;
	rsp = nvmet_rdma_get_rsp(queue);
812 813 814 815 816 817 818 819 820
	if (unlikely(!rsp)) {
		/*
		 * we get here only under memory pressure,
		 * silently drop and have the host retry
		 * as we can't even fail it.
		 */
		nvmet_rdma_post_recv(queue->dev, cmd);
		return;
	}
821
	rsp->queue = queue;
822 823 824
	rsp->cmd = cmd;
	rsp->flags = 0;
	rsp->req.cmd = cmd->nvme_cmd;
825 826
	rsp->req.port = queue->port;
	rsp->n_rdma = 0;
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861

	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
		unsigned long flags;

		spin_lock_irqsave(&queue->state_lock, flags);
		if (queue->state == NVMET_RDMA_Q_CONNECTING)
			list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
		else
			nvmet_rdma_put_rsp(rsp);
		spin_unlock_irqrestore(&queue->state_lock, flags);
		return;
	}

	nvmet_rdma_handle_command(queue, rsp);
}

static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
{
	if (!ndev->srq)
		return;

	nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
	ib_destroy_srq(ndev->srq);
}

static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
{
	struct ib_srq_init_attr srq_attr = { NULL, };
	struct ib_srq *srq;
	size_t srq_size;
	int ret, i;

	srq_size = 4095;	/* XXX: tune */

	srq_attr.attr.max_wr = srq_size;
862
	srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
	srq_attr.attr.srq_limit = 0;
	srq_attr.srq_type = IB_SRQT_BASIC;
	srq = ib_create_srq(ndev->pd, &srq_attr);
	if (IS_ERR(srq)) {
		/*
		 * If SRQs aren't supported we just go ahead and use normal
		 * non-shared receive queues.
		 */
		pr_info("SRQ requested but not supported.\n");
		return 0;
	}

	ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
	if (IS_ERR(ndev->srq_cmds)) {
		ret = PTR_ERR(ndev->srq_cmds);
		goto out_destroy_srq;
	}

	ndev->srq = srq;
	ndev->srq_size = srq_size;

884 885 886 887 888
	for (i = 0; i < srq_size; i++) {
		ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
		if (ret)
			goto out_free_cmds;
	}
889 890 891

	return 0;

892 893
out_free_cmds:
	nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
out_destroy_srq:
	ib_destroy_srq(srq);
	return ret;
}

static void nvmet_rdma_free_dev(struct kref *ref)
{
	struct nvmet_rdma_device *ndev =
		container_of(ref, struct nvmet_rdma_device, ref);

	mutex_lock(&device_list_mutex);
	list_del(&ndev->entry);
	mutex_unlock(&device_list_mutex);

	nvmet_rdma_destroy_srq(ndev);
	ib_dealloc_pd(ndev->pd);

	kfree(ndev);
}

static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
917
	struct nvmet_port *port = cm_id->context;
918
	struct nvmet_rdma_device *ndev;
919 920
	int inline_page_count;
	int inline_sge_count;
921 922 923 924 925 926 927 928 929 930 931 932 933
	int ret;

	mutex_lock(&device_list_mutex);
	list_for_each_entry(ndev, &device_list, entry) {
		if (ndev->device->node_guid == cm_id->device->node_guid &&
		    kref_get_unless_zero(&ndev->ref))
			goto out_unlock;
	}

	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
	if (!ndev)
		goto out_err;

934 935
	inline_page_count = num_pages(port->inline_data_size);
	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
936
				cm_id->device->attrs.max_recv_sge) - 1;
937 938 939 940 941 942 943 944 945
	if (inline_page_count > inline_sge_count) {
		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
			port->inline_data_size, cm_id->device->name,
			inline_sge_count * PAGE_SIZE);
		port->inline_data_size = inline_sge_count * PAGE_SIZE;
		inline_page_count = inline_sge_count;
	}
	ndev->inline_data_size = port->inline_data_size;
	ndev->inline_page_count = inline_page_count;
946 947 948
	ndev->device = cm_id->device;
	kref_init(&ndev->ref);

949
	ndev->pd = ib_alloc_pd(ndev->device, 0);
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
	if (IS_ERR(ndev->pd))
		goto out_free_dev;

	if (nvmet_rdma_use_srq) {
		ret = nvmet_rdma_init_srq(ndev);
		if (ret)
			goto out_free_pd;
	}

	list_add(&ndev->entry, &device_list);
out_unlock:
	mutex_unlock(&device_list_mutex);
	pr_debug("added %s.\n", ndev->device->name);
	return ndev;

out_free_pd:
	ib_dealloc_pd(ndev->pd);
out_free_dev:
	kfree(ndev);
out_err:
	mutex_unlock(&device_list_mutex);
	return NULL;
}

static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
{
	struct ib_qp_init_attr qp_attr;
	struct nvmet_rdma_device *ndev = queue->dev;
	int comp_vector, nr_cqe, ret, i;

	/*
	 * Spread the io queues across completion vectors,
	 * but still keep all admin queues on vector 0.
	 */
	comp_vector = !queue->host_qid ? 0 :
		queue->idx % ndev->device->num_comp_vectors;

	/*
	 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
	 */
	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;

	queue->cq = ib_alloc_cq(ndev->device, queue,
			nr_cqe + 1, comp_vector,
			IB_POLL_WORKQUEUE);
	if (IS_ERR(queue->cq)) {
		ret = PTR_ERR(queue->cq);
		pr_err("failed to create CQ cqe= %d ret= %d\n",
		       nr_cqe + 1, ret);
		goto out;
	}

	memset(&qp_attr, 0, sizeof(qp_attr));
	qp_attr.qp_context = queue;
	qp_attr.event_handler = nvmet_rdma_qp_event;
	qp_attr.send_cq = queue->cq;
	qp_attr.recv_cq = queue->cq;
	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
	qp_attr.qp_type = IB_QPT_RC;
	/* +1 for drain */
	qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
	qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
	qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
1013
					ndev->device->attrs.max_send_sge);
1014 1015 1016 1017 1018 1019

	if (ndev->srq) {
		qp_attr.srq = ndev->srq;
	} else {
		/* +1 for drain */
		qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
1020
		qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	}

	ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
	if (ret) {
		pr_err("failed to create_qp ret= %d\n", ret);
		goto err_destroy_cq;
	}

	atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);

	pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
		 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
		 qp_attr.cap.max_send_wr, queue->cm_id);

	if (!ndev->srq) {
		for (i = 0; i < queue->recv_queue_size; i++) {
			queue->cmds[i].queue = queue;
1038 1039 1040
			ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
			if (ret)
				goto err_destroy_qp;
1041 1042 1043 1044 1045 1046
		}
	}

out:
	return ret;

1047 1048
err_destroy_qp:
	rdma_destroy_qp(queue->cm_id);
1049 1050 1051 1052 1053 1054 1055
err_destroy_cq:
	ib_free_cq(queue->cq);
	goto out;
}

static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
1056 1057 1058 1059 1060
	struct ib_qp *qp = queue->cm_id->qp;

	ib_drain_qp(qp);
	rdma_destroy_id(queue->cm_id);
	ib_destroy_qp(qp);
1061 1062 1063 1064 1065
	ib_free_cq(queue->cq);
}

static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{
1066
	pr_debug("freeing queue %d\n", queue->idx);
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087

	nvmet_sq_destroy(&queue->nvme_sq);

	nvmet_rdma_destroy_queue_ib(queue);
	if (!queue->dev->srq) {
		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
				queue->recv_queue_size,
				!queue->host_qid);
	}
	nvmet_rdma_free_rsps(queue);
	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
	kfree(queue);
}

static void nvmet_rdma_release_queue_work(struct work_struct *w)
{
	struct nvmet_rdma_queue *queue =
		container_of(w, struct nvmet_rdma_queue, release_work);
	struct nvmet_rdma_device *dev = queue->dev;

	nvmet_rdma_free_queue(queue);
1088

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	kref_put(&dev->ref, nvmet_rdma_free_dev);
}

static int
nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
				struct nvmet_rdma_queue *queue)
{
	struct nvme_rdma_cm_req *req;

	req = (struct nvme_rdma_cm_req *)conn->private_data;
	if (!req || conn->private_data_len == 0)
		return NVME_RDMA_CM_INVALID_LEN;

	if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
		return NVME_RDMA_CM_INVALID_RECFMT;

	queue->host_qid = le16_to_cpu(req->qid);

	/*
1108
	 * req->hsqsize corresponds to our recv queue size plus 1
1109 1110
	 * req->hrqsize corresponds to our send queue size
	 */
1111
	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1112 1113
	queue->send_queue_size = le16_to_cpu(req->hrqsize);

1114
	if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
		return NVME_RDMA_CM_INVALID_HSQSIZE;

	/* XXX: Should we enforce some kind of max for IO queues? */

	return 0;
}

static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
				enum nvme_rdma_cm_status status)
{
	struct nvme_rdma_cm_rej rej;

1127 1128 1129
	pr_debug("rejecting connect request: status %d (%s)\n",
		 status, nvme_rdma_cm_msg(status));

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
	rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
	rej.sts = cpu_to_le16(status);

	return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
}

static struct nvmet_rdma_queue *
nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
		struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event)
{
	struct nvmet_rdma_queue *queue;
	int ret;

	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
	if (!queue) {
		ret = NVME_RDMA_CM_NO_RSC;
		goto out_reject;
	}

	ret = nvmet_sq_init(&queue->nvme_sq);
B
Bart Van Assche 已提交
1151 1152
	if (ret) {
		ret = NVME_RDMA_CM_NO_RSC;
1153
		goto out_free_queue;
B
Bart Van Assche 已提交
1154
	}
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174

	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
	if (ret)
		goto out_destroy_sq;

	/*
	 * Schedules the actual release because calling rdma_destroy_id from
	 * inside a CM callback would trigger a deadlock. (great API design..)
	 */
	INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
	queue->dev = ndev;
	queue->cm_id = cm_id;

	spin_lock_init(&queue->state_lock);
	queue->state = NVMET_RDMA_Q_CONNECTING;
	INIT_LIST_HEAD(&queue->rsp_wait_list);
	INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
	spin_lock_init(&queue->rsp_wr_wait_lock);
	INIT_LIST_HEAD(&queue->free_rsps);
	spin_lock_init(&queue->rsps_lock);
1175
	INIT_LIST_HEAD(&queue->queue_list);
1176 1177 1178 1179

	queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
	if (queue->idx < 0) {
		ret = NVME_RDMA_CM_NO_RSC;
1180
		goto out_destroy_sq;
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
	}

	ret = nvmet_rdma_alloc_rsps(queue);
	if (ret) {
		ret = NVME_RDMA_CM_NO_RSC;
		goto out_ida_remove;
	}

	if (!ndev->srq) {
		queue->cmds = nvmet_rdma_alloc_cmds(ndev,
				queue->recv_queue_size,
				!queue->host_qid);
		if (IS_ERR(queue->cmds)) {
			ret = NVME_RDMA_CM_NO_RSC;
			goto out_free_responses;
		}
	}

	ret = nvmet_rdma_create_queue_ib(queue);
	if (ret) {
		pr_err("%s: creating RDMA queue failed (%d).\n",
			__func__, ret);
		ret = NVME_RDMA_CM_NO_RSC;
		goto out_free_cmds;
	}

	return queue;

out_free_cmds:
	if (!ndev->srq) {
		nvmet_rdma_free_cmds(queue->dev, queue->cmds,
				queue->recv_queue_size,
				!queue->host_qid);
	}
out_free_responses:
	nvmet_rdma_free_rsps(queue);
out_ida_remove:
	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
out_destroy_sq:
	nvmet_sq_destroy(&queue->nvme_sq);
out_free_queue:
	kfree(queue);
out_reject:
	nvmet_rdma_cm_reject(cm_id, ret);
	return NULL;
}

static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
{
	struct nvmet_rdma_queue *queue = priv;

	switch (event->event) {
	case IB_EVENT_COMM_EST:
		rdma_notify(queue->cm_id, event->event);
		break;
	default:
1237 1238
		pr_err("received IB QP event: %s (%d)\n",
		       ib_event_msg(event->event), event->event);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
		break;
	}
}

static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
		struct nvmet_rdma_queue *queue,
		struct rdma_conn_param *p)
{
	struct rdma_conn_param  param = { };
	struct nvme_rdma_cm_rep priv = { };
	int ret = -ENOMEM;

	param.rnr_retry_count = 7;
	param.flow_control = 1;
	param.initiator_depth = min_t(u8, p->initiator_depth,
		queue->dev->device->attrs.max_qp_init_rd_atom);
	param.private_data = &priv;
	param.private_data_len = sizeof(priv);
	priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
	priv.crqsize = cpu_to_le16(queue->recv_queue_size);

	ret = rdma_accept(cm_id, &param);
	if (ret)
		pr_err("rdma_accept failed (error code = %d)\n", ret);

	return ret;
}

static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event)
{
	struct nvmet_rdma_device *ndev;
	struct nvmet_rdma_queue *queue;
	int ret = -EINVAL;

	ndev = nvmet_rdma_find_get_device(cm_id);
	if (!ndev) {
		nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
		return -ECONNREFUSED;
	}

	queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
	if (!queue) {
		ret = -ENOMEM;
		goto put_device;
	}
	queue->port = cm_id->context;

1287 1288
	if (queue->host_qid == 0) {
		/* Let inflight controller teardown complete */
1289
		flush_scheduled_work();
1290 1291
	}

1292
	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1293
	if (ret) {
1294
		schedule_work(&queue->release_work);
1295 1296 1297
		/* Destroying rdma_cm id is not needed here */
		return 0;
	}
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349

	mutex_lock(&nvmet_rdma_queue_mutex);
	list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
	mutex_unlock(&nvmet_rdma_queue_mutex);

	return 0;

put_device:
	kref_put(&ndev->ref, nvmet_rdma_free_dev);

	return ret;
}

static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
{
	unsigned long flags;

	spin_lock_irqsave(&queue->state_lock, flags);
	if (queue->state != NVMET_RDMA_Q_CONNECTING) {
		pr_warn("trying to establish a connected queue\n");
		goto out_unlock;
	}
	queue->state = NVMET_RDMA_Q_LIVE;

	while (!list_empty(&queue->rsp_wait_list)) {
		struct nvmet_rdma_rsp *cmd;

		cmd = list_first_entry(&queue->rsp_wait_list,
					struct nvmet_rdma_rsp, wait_list);
		list_del(&cmd->wait_list);

		spin_unlock_irqrestore(&queue->state_lock, flags);
		nvmet_rdma_handle_command(queue, cmd);
		spin_lock_irqsave(&queue->state_lock, flags);
	}

out_unlock:
	spin_unlock_irqrestore(&queue->state_lock, flags);
}

static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
{
	bool disconnect = false;
	unsigned long flags;

	pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);

	spin_lock_irqsave(&queue->state_lock, flags);
	switch (queue->state) {
	case NVMET_RDMA_Q_CONNECTING:
	case NVMET_RDMA_Q_LIVE:
		queue->state = NVMET_RDMA_Q_DISCONNECTING;
1350
		disconnect = true;
1351 1352 1353 1354 1355 1356 1357 1358
		break;
	case NVMET_RDMA_Q_DISCONNECTING:
		break;
	}
	spin_unlock_irqrestore(&queue->state_lock, flags);

	if (disconnect) {
		rdma_disconnect(queue->cm_id);
1359
		schedule_work(&queue->release_work);
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
	}
}

static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
{
	bool disconnect = false;

	mutex_lock(&nvmet_rdma_queue_mutex);
	if (!list_empty(&queue->queue_list)) {
		list_del_init(&queue->queue_list);
		disconnect = true;
	}
	mutex_unlock(&nvmet_rdma_queue_mutex);

	if (disconnect)
		__nvmet_rdma_queue_disconnect(queue);
}

static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
		struct nvmet_rdma_queue *queue)
{
	WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);

1383 1384 1385 1386 1387 1388
	mutex_lock(&nvmet_rdma_queue_mutex);
	if (!list_empty(&queue->queue_list))
		list_del_init(&queue->queue_list);
	mutex_unlock(&nvmet_rdma_queue_mutex);

	pr_err("failed to connect queue %d\n", queue->idx);
1389
	schedule_work(&queue->release_work);
1390 1391
}

1392 1393
/**
 * nvme_rdma_device_removal() - Handle RDMA device removal
1394
 * @cm_id:	rdma_cm id, used for nvmet port
1395 1396 1397
 * @queue:      nvmet rdma queue (cm id qp_context)
 *
 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1398 1399 1400
 * to unplug. Note that this event can be generated on a normal
 * queue cm_id and/or a device bound listener cm_id (where in this
 * case queue will be null).
1401
 *
1402 1403
 * We registered an ib_client to handle device removal for queues,
 * so we only need to handle the listening port cm_ids. In this case
1404 1405 1406 1407 1408 1409
 * we nullify the priv to prevent double cm_id destruction and destroying
 * the cm_id implicitely by returning a non-zero rc to the callout.
 */
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
		struct nvmet_rdma_queue *queue)
{
1410
	struct nvmet_port *port;
1411

1412
	if (queue) {
1413
		/*
1414 1415 1416
		 * This is a queue cm_id. we have registered
		 * an ib_client to handle queues removal
		 * so don't interfear and just return.
1417
		 */
1418
		return 0;
1419 1420
	}

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	port = cm_id->context;

	/*
	 * This is a listener cm_id. Make sure that
	 * future remove_port won't invoke a double
	 * cm_id destroy. use atomic xchg to make sure
	 * we don't compete with remove_port.
	 */
	if (xchg(&port->priv, NULL) != cm_id)
		return 0;

1432 1433 1434 1435 1436 1437 1438
	/*
	 * We need to return 1 so that the core will destroy
	 * it's own ID.  What a great API design..
	 */
	return 1;
}

1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event)
{
	struct nvmet_rdma_queue *queue = NULL;
	int ret = 0;

	if (cm_id->qp)
		queue = cm_id->qp->qp_context;

	pr_debug("%s (%d): status %d id %p\n",
		rdma_event_msg(event->event), event->event,
		event->status, cm_id);

	switch (event->event) {
	case RDMA_CM_EVENT_CONNECT_REQUEST:
		ret = nvmet_rdma_queue_connect(cm_id, event);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		nvmet_rdma_queue_established(queue);
		break;
	case RDMA_CM_EVENT_ADDR_CHANGE:
	case RDMA_CM_EVENT_DISCONNECTED:
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1462
		nvmet_rdma_queue_disconnect(queue);
1463 1464 1465
		break;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		ret = nvmet_rdma_device_removal(cm_id, queue);
1466 1467
		break;
	case RDMA_CM_EVENT_REJECTED:
1468 1469 1470
		pr_debug("Connection rejected: %s\n",
			 rdma_reject_msg(cm_id, event->status));
		/* FALLTHROUGH */
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_CONNECT_ERROR:
		nvmet_rdma_queue_connect_fail(cm_id, queue);
		break;
	default:
		pr_err("received unrecognized RDMA CM event %d\n",
			event->event);
		break;
	}

	return ret;
}

static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
{
	struct nvmet_rdma_queue *queue;

restart:
	mutex_lock(&nvmet_rdma_queue_mutex);
	list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
		if (queue->nvme_sq.ctrl == ctrl) {
			list_del_init(&queue->queue_list);
			mutex_unlock(&nvmet_rdma_queue_mutex);

			__nvmet_rdma_queue_disconnect(queue);
			goto restart;
		}
	}
	mutex_unlock(&nvmet_rdma_queue_mutex);
}

static int nvmet_rdma_add_port(struct nvmet_port *port)
{
	struct rdma_cm_id *cm_id;
1505 1506
	struct sockaddr_storage addr = { };
	__kernel_sa_family_t af;
1507 1508 1509 1510
	int ret;

	switch (port->disc_addr.adrfam) {
	case NVMF_ADDR_FAMILY_IP4:
1511 1512 1513 1514
		af = AF_INET;
		break;
	case NVMF_ADDR_FAMILY_IP6:
		af = AF_INET6;
1515 1516 1517 1518 1519 1520 1521
		break;
	default:
		pr_err("address family %d not supported\n",
				port->disc_addr.adrfam);
		return -EINVAL;
	}

1522 1523 1524 1525 1526 1527 1528 1529 1530
	if (port->inline_data_size < 0) {
		port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
	} else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
		pr_warn("inline_data_size %u is too large, reducing to %u\n",
			port->inline_data_size,
			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
		port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
	}

1531 1532 1533 1534 1535
	ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
			port->disc_addr.trsvcid, &addr);
	if (ret) {
		pr_err("malformed ip/port passed: %s:%s\n",
			port->disc_addr.traddr, port->disc_addr.trsvcid);
1536
		return ret;
1537
	}
1538 1539 1540 1541 1542 1543 1544 1545

	cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
			RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(cm_id)) {
		pr_err("CM ID creation failed\n");
		return PTR_ERR(cm_id);
	}

1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
	/*
	 * Allow both IPv4 and IPv6 sockets to bind a single port
	 * at the same time.
	 */
	ret = rdma_set_afonly(cm_id, 1);
	if (ret) {
		pr_err("rdma_set_afonly failed (%d)\n", ret);
		goto out_destroy_id;
	}

	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
1557
	if (ret) {
1558 1559
		pr_err("binding CM ID to %pISpcs failed (%d)\n",
			(struct sockaddr *)&addr, ret);
1560 1561 1562 1563 1564
		goto out_destroy_id;
	}

	ret = rdma_listen(cm_id, 128);
	if (ret) {
1565 1566
		pr_err("listening to %pISpcs failed (%d)\n",
			(struct sockaddr *)&addr, ret);
1567 1568 1569
		goto out_destroy_id;
	}

1570 1571
	pr_info("enabling port %d (%pISpcs)\n",
		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
	port->priv = cm_id;
	return 0;

out_destroy_id:
	rdma_destroy_id(cm_id);
	return ret;
}

static void nvmet_rdma_remove_port(struct nvmet_port *port)
{
1582
	struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1583

1584 1585
	if (cm_id)
		rdma_destroy_id(cm_id);
1586 1587
}

1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
		struct nvmet_port *port, char *traddr)
{
	struct rdma_cm_id *cm_id = port->priv;

	if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
		struct nvmet_rdma_rsp *rsp =
			container_of(req, struct nvmet_rdma_rsp, req);
		struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
		struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;

		sprintf(traddr, "%pISc", addr);
	} else {
		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
	}
}

1605
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
1606 1607 1608 1609 1610 1611 1612 1613
	.owner			= THIS_MODULE,
	.type			= NVMF_TRTYPE_RDMA,
	.msdbd			= 1,
	.has_keyed_sgls		= 1,
	.add_port		= nvmet_rdma_add_port,
	.remove_port		= nvmet_rdma_remove_port,
	.queue_response		= nvmet_rdma_queue_response,
	.delete_ctrl		= nvmet_rdma_delete_ctrl,
1614
	.disc_traddr		= nvmet_rdma_disc_port_addr,
1615 1616
};

1617 1618
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{
1619
	struct nvmet_rdma_queue *queue, *tmp;
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
	struct nvmet_rdma_device *ndev;
	bool found = false;

	mutex_lock(&device_list_mutex);
	list_for_each_entry(ndev, &device_list, entry) {
		if (ndev->device == ib_device) {
			found = true;
			break;
		}
	}
	mutex_unlock(&device_list_mutex);

	if (!found)
		return;
1634

1635 1636 1637 1638
	/*
	 * IB Device that is used by nvmet controllers is being removed,
	 * delete all queues using this device.
	 */
1639
	mutex_lock(&nvmet_rdma_queue_mutex);
1640 1641
	list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
				 queue_list) {
1642 1643 1644 1645
		if (queue->dev->device != ib_device)
			continue;

		pr_info("Removing queue %d\n", queue->idx);
1646
		list_del_init(&queue->queue_list);
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
		__nvmet_rdma_queue_disconnect(queue);
	}
	mutex_unlock(&nvmet_rdma_queue_mutex);

	flush_scheduled_work();
}

static struct ib_client nvmet_rdma_ib_client = {
	.name   = "nvmet_rdma",
	.remove = nvmet_rdma_remove_one
};

1659 1660
static int __init nvmet_rdma_init(void)
{
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
	int ret;

	ret = ib_register_client(&nvmet_rdma_ib_client);
	if (ret)
		return ret;

	ret = nvmet_register_transport(&nvmet_rdma_ops);
	if (ret)
		goto err_ib_client;

	return 0;

err_ib_client:
	ib_unregister_client(&nvmet_rdma_ib_client);
	return ret;
1676 1677 1678 1679 1680
}

static void __exit nvmet_rdma_exit(void)
{
	nvmet_unregister_transport(&nvmet_rdma_ops);
1681
	ib_unregister_client(&nvmet_rdma_ib_client);
1682
	WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
1683 1684 1685 1686 1687 1688 1689 1690
	ida_destroy(&nvmet_rdma_queue_ida);
}

module_init(nvmet_rdma_init);
module_exit(nvmet_rdma_exit);

MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */