tcp.c 65.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// SPDX-License-Identifier: GPL-2.0
/*
 * NVMe over Fabrics TCP host.
 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/nvme-tcp.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/blk-mq.h>
#include <crypto/hash.h>
S
Sagi Grimberg 已提交
16
#include <net/busy_poll.h>
17 18 19 20 21 22

#include "nvme.h"
#include "fabrics.h"

struct nvme_tcp_queue;

23 24 25 26 27 28 29 30 31 32
/* Define the socket priority to use for connections were it is desirable
 * that the NIC consider performing optimized packet processing or filtering.
 * A non-zero value being sufficient to indicate general consideration of any
 * possible optimization.  Making it a module param allows for alternative
 * values that may be unique for some NIC implementations.
 */
static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
enum nvme_tcp_send_state {
	NVME_TCP_SEND_CMD_PDU = 0,
	NVME_TCP_SEND_H2C_PDU,
	NVME_TCP_SEND_DATA,
	NVME_TCP_SEND_DDGST,
};

struct nvme_tcp_request {
	struct nvme_request	req;
	void			*pdu;
	struct nvme_tcp_queue	*queue;
	u32			data_len;
	u32			pdu_len;
	u32			pdu_sent;
	u16			ttag;
	struct list_head	entry;
49
	struct llist_node	lentry;
50
	__le32			ddgst;
51 52 53 54 55 56 57 58 59 60 61 62 63

	struct bio		*curr_bio;
	struct iov_iter		iter;

	/* send state */
	size_t			offset;
	size_t			data_sent;
	enum nvme_tcp_send_state state;
};

enum nvme_tcp_queue_flags {
	NVME_TCP_Q_ALLOCATED	= 0,
	NVME_TCP_Q_LIVE		= 1,
64
	NVME_TCP_Q_POLLING	= 2,
65 66 67 68 69 70 71 72 73 74 75 76 77 78
};

enum nvme_tcp_recv_state {
	NVME_TCP_RECV_PDU = 0,
	NVME_TCP_RECV_DATA,
	NVME_TCP_RECV_DDGST,
};

struct nvme_tcp_ctrl;
struct nvme_tcp_queue {
	struct socket		*sock;
	struct work_struct	io_work;
	int			io_cpu;

79
	struct mutex		queue_lock;
80
	struct mutex		send_mutex;
81
	struct llist_head	req_list;
82
	struct list_head	send_list;
83
	bool			more_requests;
84 85 86 87 88 89 90

	/* recv state */
	void			*pdu;
	int			pdu_remaining;
	int			pdu_offset;
	size_t			data_remaining;
	size_t			ddgst_remaining;
S
Sagi Grimberg 已提交
91
	unsigned int		nr_cqe;
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

	/* send state */
	struct nvme_tcp_request *request;

	int			queue_size;
	size_t			cmnd_capsule_len;
	struct nvme_tcp_ctrl	*ctrl;
	unsigned long		flags;
	bool			rd_enabled;

	bool			hdr_digest;
	bool			data_digest;
	struct ahash_request	*rcv_hash;
	struct ahash_request	*snd_hash;
	__le32			exp_ddgst;
	__le32			recv_ddgst;

	struct page_frag_cache	pf_cache;

	void (*state_change)(struct sock *);
	void (*data_ready)(struct sock *);
	void (*write_space)(struct sock *);
};

struct nvme_tcp_ctrl {
	/* read only in the hot path */
	struct nvme_tcp_queue	*queues;
	struct blk_mq_tag_set	tag_set;

	/* other member variables */
	struct list_head	list;
	struct blk_mq_tag_set	admin_tag_set;
	struct sockaddr_storage addr;
	struct sockaddr_storage src_addr;
	struct nvme_ctrl	ctrl;

	struct work_struct	err_work;
	struct delayed_work	connect_work;
	struct nvme_tcp_request async_req;
131
	u32			io_queues[HCTX_MAX_TYPES];
132 133 134 135 136
};

static LIST_HEAD(nvme_tcp_ctrl_list);
static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
static struct workqueue_struct *nvme_tcp_wq;
137 138
static const struct blk_mq_ops nvme_tcp_mq_ops;
static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
139
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
}

static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
{
	return queue - queue->ctrl->queues;
}

static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
{
	u32 queue_idx = nvme_tcp_queue_id(queue);

	if (queue_idx == 0)
		return queue->ctrl->admin_tag_set.tags[queue_idx];
	return queue->ctrl->tag_set.tags[queue_idx - 1];
}

static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
{
	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}

static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
{
	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}

static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
{
	return queue->cmnd_capsule_len - sizeof(struct nvme_command);
}

static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
{
	return req == &req->queue->ctrl->async_req;
}

static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
{
	struct request *rq;

	if (unlikely(nvme_tcp_async_req(req)))
		return false; /* async events don't have a request */

	rq = blk_mq_rq_from_pdu(req);

189 190
	return rq_data_dir(rq) == WRITE && req->data_len &&
		req->data_len <= nvme_tcp_inline_data_size(req->queue);
191 192 193 194 195 196 197 198 199 200 201 202 203 204
}

static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
{
	return req->iter.bvec->bv_page;
}

static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
{
	return req->iter.bvec->bv_offset + req->iter.iov_offset;
}

static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{
205
	return min_t(size_t, iov_iter_single_seg_count(&req->iter),
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
			req->pdu_len - req->pdu_sent);
}

static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
{
	return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
			req->pdu_len - req->pdu_sent : 0;
}

static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
		int len)
{
	return nvme_tcp_pdu_data_left(req) <= len;
}

static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
		unsigned int dir)
{
	struct request *rq = blk_mq_rq_from_pdu(req);
	struct bio_vec *vec;
	unsigned int size;
227
	int nr_bvec;
228 229 230 231
	size_t offset;

	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
		vec = &rq->special_vec;
232
		nr_bvec = 1;
233 234 235 236
		size = blk_rq_payload_bytes(rq);
		offset = 0;
	} else {
		struct bio *bio = req->curr_bio;
237 238
		struct bvec_iter bi;
		struct bio_vec bv;
239 240

		vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
241 242 243 244
		nr_bvec = 0;
		bio_for_each_bvec(bv, bio, bi) {
			nr_bvec++;
		}
245 246 247 248
		size = bio->bi_iter.bi_size;
		offset = bio->bi_iter.bi_bvec_done;
	}

249
	iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	req->iter.iov_offset = offset;
}

static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
		int len)
{
	req->data_sent += len;
	req->pdu_sent += len;
	iov_iter_advance(&req->iter, len);
	if (!iov_iter_count(&req->iter) &&
	    req->data_sent < req->data_len) {
		req->curr_bio = req->curr_bio->bi_next;
		nvme_tcp_init_iter(req, WRITE);
	}
}

266 267 268 269 270 271 272 273 274 275
static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
{
	int ret;

	/* drain the send queue as much as we can... */
	do {
		ret = nvme_tcp_try_send(queue);
	} while (ret > 0);
}

276
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
277
		bool sync, bool last)
278 279
{
	struct nvme_tcp_queue *queue = req->queue;
280
	bool empty;
281

282 283
	empty = llist_add(&req->lentry, &queue->req_list) &&
		list_empty(&queue->send_list) && !queue->request;
284

285 286 287 288 289
	/*
	 * if we're the first on the send_list and we can try to send
	 * directly, otherwise queue io_work. Also, only do that if we
	 * are on the same cpu, so we don't introduce contention.
	 */
290
	if (queue->io_cpu == raw_smp_processor_id() &&
291
	    sync && empty && mutex_trylock(&queue->send_mutex)) {
292
		queue->more_requests = !last;
293
		nvme_tcp_send_all(queue);
294
		queue->more_requests = false;
295
		mutex_unlock(&queue->send_mutex);
296
	} else if (last) {
297 298
		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
	}
299 300
}

301 302 303 304 305 306 307 308 309 310 311
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_request *req;
	struct llist_node *node;

	for (node = llist_del_all(&queue->req_list); node; node = node->next) {
		req = llist_entry(node, struct nvme_tcp_request, lentry);
		list_add(&req->entry, &queue->send_list);
	}
}

312 313 314 315 316 317 318
static inline struct nvme_tcp_request *
nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_request *req;

	req = list_first_entry_or_null(&queue->send_list,
			struct nvme_tcp_request, entry);
319 320 321 322 323 324 325
	if (!req) {
		nvme_tcp_process_req_list(queue);
		req = list_first_entry_or_null(&queue->send_list,
				struct nvme_tcp_request, entry);
		if (unlikely(!req))
			return NULL;
	}
326

327
	list_del(&req->entry);
328 329 330
	return req;
}

331 332
static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
		__le32 *dgst)
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
{
	ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
	crypto_ahash_final(hash);
}

static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
		struct page *page, off_t off, size_t len)
{
	struct scatterlist sg;

	sg_init_marker(&sg, 1);
	sg_set_page(&sg, page, len, off);
	ahash_request_set_crypt(hash, &sg, NULL, len);
	crypto_ahash_update(hash);
}

static inline void nvme_tcp_hdgst(struct ahash_request *hash,
		void *pdu, size_t len)
{
	struct scatterlist sg;

	sg_init_one(&sg, pdu, len);
	ahash_request_set_crypt(hash, &sg, pdu + len, len);
	crypto_ahash_digest(hash);
}

static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
		void *pdu, size_t pdu_len)
{
	struct nvme_tcp_hdr *hdr = pdu;
	__le32 recv_digest;
	__le32 exp_digest;

	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
		dev_err(queue->ctrl->ctrl.device,
			"queue %d: header digest flag is cleared\n",
			nvme_tcp_queue_id(queue));
		return -EPROTO;
	}

	recv_digest = *(__le32 *)(pdu + hdr->hlen);
	nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
	exp_digest = *(__le32 *)(pdu + hdr->hlen);
	if (recv_digest != exp_digest) {
		dev_err(queue->ctrl->ctrl.device,
			"header digest error: recv %#x expected %#x\n",
			le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
		return -EIO;
	}

	return 0;
}

static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
{
	struct nvme_tcp_hdr *hdr = pdu;
	u8 digest_len = nvme_tcp_hdgst_len(queue);
	u32 len;

	len = le32_to_cpu(hdr->plen) - hdr->hlen -
		((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);

	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
		dev_err(queue->ctrl->ctrl.device,
			"queue %d: data digest flag is cleared\n",
		nvme_tcp_queue_id(queue));
		return -EPROTO;
	}
	crypto_ahash_init(queue->rcv_hash);

	return 0;
}

static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
		struct request *rq, unsigned int hctx_idx)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);

	page_frag_free(req->pdu);
}

static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
		struct request *rq, unsigned int hctx_idx,
		unsigned int numa_node)
{
	struct nvme_tcp_ctrl *ctrl = set->driver_data;
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
420
	struct nvme_tcp_cmd_pdu *pdu;
421 422 423 424 425 426 427 428 429 430
	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
	struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
	u8 hdgst = nvme_tcp_hdgst_len(queue);

	req->pdu = page_frag_alloc(&queue->pf_cache,
		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
		GFP_KERNEL | __GFP_ZERO);
	if (!req->pdu)
		return -ENOMEM;

431
	pdu = req->pdu;
432 433
	req->queue = queue;
	nvme_req(rq)->ctrl = &ctrl->ctrl;
434
	nvme_req(rq)->cmd = &pdu->cmd;
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480

	return 0;
}

static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_tcp_ctrl *ctrl = data;
	struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];

	hctx->driver_data = queue;
	return 0;
}

static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{
	struct nvme_tcp_ctrl *ctrl = data;
	struct nvme_tcp_queue *queue = &ctrl->queues[0];

	hctx->driver_data = queue;
	return 0;
}

static enum nvme_tcp_recv_state
nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
{
	return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
		(queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
		NVME_TCP_RECV_DATA;
}

static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
{
	queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
				nvme_tcp_hdgst_len(queue);
	queue->pdu_offset = 0;
	queue->data_remaining = -1;
	queue->ddgst_remaining = 0;
}

static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
{
	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
		return;

S
Sagi Grimberg 已提交
481
	dev_warn(ctrl->device, "starting error recovery\n");
482
	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
483 484 485 486 487 488 489
}

static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
		struct nvme_completion *cqe)
{
	struct request *rq;

490
	rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
491 492
	if (!rq) {
		dev_err(queue->ctrl->ctrl.device,
493 494
			"got bad cqe.command_id %#x on queue %d\n",
			cqe->command_id, nvme_tcp_queue_id(queue));
495 496 497 498
		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
		return -EINVAL;
	}

499
	if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
500
		nvme_complete_rq(rq);
S
Sagi Grimberg 已提交
501
	queue->nr_cqe++;
502 503 504 505 506 507 508 509 510

	return 0;
}

static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
		struct nvme_tcp_data_pdu *pdu)
{
	struct request *rq;

511
	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
512 513
	if (!rq) {
		dev_err(queue->ctrl->ctrl.device,
514 515
			"got bad c2hdata.command_id %#x on queue %d\n",
			pdu->command_id, nvme_tcp_queue_id(queue));
516 517 518 519 520 521 522 523 524 525 526 527
		return -ENOENT;
	}

	if (!blk_rq_payload_bytes(rq)) {
		dev_err(queue->ctrl->ctrl.device,
			"queue %d tag %#x unexpected data\n",
			nvme_tcp_queue_id(queue), rq->tag);
		return -EIO;
	}

	queue->data_remaining = le32_to_cpu(pdu->data_length);

528 529 530 531 532 533 534 535 536
	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
	    unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
		dev_err(queue->ctrl->ctrl.device,
			"queue %d tag %#x SUCCESS set but not last PDU\n",
			nvme_tcp_queue_id(queue), rq->tag);
		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
		return -EPROTO;
	}

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	return 0;
}

static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
		struct nvme_tcp_rsp_pdu *pdu)
{
	struct nvme_completion *cqe = &pdu->cqe;
	int ret = 0;

	/*
	 * AEN requests are special as they don't time out and can
	 * survive any kind of queue freeze and often don't respond to
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
552 553
	if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
				     cqe->command_id)))
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
				&cqe->result);
	else
		ret = nvme_tcp_process_nvme_cqe(queue, cqe);

	return ret;
}

static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
		struct nvme_tcp_r2t_pdu *pdu)
{
	struct nvme_tcp_data_pdu *data = req->pdu;
	struct nvme_tcp_queue *queue = req->queue;
	struct request *rq = blk_mq_rq_from_pdu(req);
	u8 hdgst = nvme_tcp_hdgst_len(queue);
	u8 ddgst = nvme_tcp_ddgst_len(queue);

	req->pdu_len = le32_to_cpu(pdu->r2t_length);
	req->pdu_sent = 0;

574 575 576 577 578 579 580
	if (unlikely(!req->pdu_len)) {
		dev_err(queue->ctrl->ctrl.device,
			"req %d r2t len is %u, probably a bug...\n",
			rq->tag, req->pdu_len);
		return -EPROTO;
	}

581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
		dev_err(queue->ctrl->ctrl.device,
			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
			rq->tag, req->pdu_len, req->data_len,
			req->data_sent);
		return -EPROTO;
	}

	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
		dev_err(queue->ctrl->ctrl.device,
			"req %d unexpected r2t offset %u (expected %zu)\n",
			rq->tag, le32_to_cpu(pdu->r2t_offset),
			req->data_sent);
		return -EPROTO;
	}

	memset(data, 0, sizeof(*data));
	data->hdr.type = nvme_tcp_h2c_data;
	data->hdr.flags = NVME_TCP_F_DATA_LAST;
	if (queue->hdr_digest)
		data->hdr.flags |= NVME_TCP_F_HDGST;
	if (queue->data_digest)
		data->hdr.flags |= NVME_TCP_F_DDGST;
	data->hdr.hlen = sizeof(*data);
	data->hdr.pdo = data->hdr.hlen + hdgst;
	data->hdr.plen =
		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
	data->ttag = pdu->ttag;
609
	data->command_id = nvme_cid(rq);
610 611 612 613 614 615 616 617 618 619 620 621
	data->data_offset = cpu_to_le32(req->data_sent);
	data->data_length = cpu_to_le32(req->pdu_len);
	return 0;
}

static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
		struct nvme_tcp_r2t_pdu *pdu)
{
	struct nvme_tcp_request *req;
	struct request *rq;
	int ret;

622
	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
623 624
	if (!rq) {
		dev_err(queue->ctrl->ctrl.device,
625 626
			"got bad r2t.command_id %#x on queue %d\n",
			pdu->command_id, nvme_tcp_queue_id(queue));
627 628 629 630 631 632 633 634 635 636 637
		return -ENOENT;
	}
	req = blk_mq_rq_to_pdu(rq);

	ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
	if (unlikely(ret))
		return ret;

	req->state = NVME_TCP_SEND_H2C_PDU;
	req->offset = 0;

638
	nvme_tcp_queue_request(req, false, true);
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

	return 0;
}

static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
		unsigned int *offset, size_t *len)
{
	struct nvme_tcp_hdr *hdr;
	char *pdu = queue->pdu;
	size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
	int ret;

	ret = skb_copy_bits(skb, *offset,
		&pdu[queue->pdu_offset], rcv_len);
	if (unlikely(ret))
		return ret;

	queue->pdu_remaining -= rcv_len;
	queue->pdu_offset += rcv_len;
	*offset += rcv_len;
	*len -= rcv_len;
	if (queue->pdu_remaining)
		return 0;

	hdr = queue->pdu;
	if (queue->hdr_digest) {
		ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
		if (unlikely(ret))
			return ret;
	}


	if (queue->data_digest) {
		ret = nvme_tcp_check_ddgst(queue, queue->pdu);
		if (unlikely(ret))
			return ret;
	}

	switch (hdr->type) {
	case nvme_tcp_c2h_data:
679
		return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
680 681
	case nvme_tcp_rsp:
		nvme_tcp_init_recv_ctx(queue);
682
		return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
683 684
	case nvme_tcp_r2t:
		nvme_tcp_init_recv_ctx(queue);
685
		return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
686 687 688 689 690 691 692
	default:
		dev_err(queue->ctrl->ctrl.device,
			"unsupported pdu type (%d)\n", hdr->type);
		return -EINVAL;
	}
}

693
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
694 695 696
{
	union nvme_result res = {};

697
	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
698
		nvme_complete_rq(rq);
699 700
}

701 702 703 704
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
			      unsigned int *offset, size_t *len)
{
	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
705
	struct request *rq =
706
		nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
707
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759

	while (true) {
		int recv_len, ret;

		recv_len = min_t(size_t, *len, queue->data_remaining);
		if (!recv_len)
			break;

		if (!iov_iter_count(&req->iter)) {
			req->curr_bio = req->curr_bio->bi_next;

			/*
			 * If we don`t have any bios it means that controller
			 * sent more data than we requested, hence error
			 */
			if (!req->curr_bio) {
				dev_err(queue->ctrl->ctrl.device,
					"queue %d no space in request %#x",
					nvme_tcp_queue_id(queue), rq->tag);
				nvme_tcp_init_recv_ctx(queue);
				return -EIO;
			}
			nvme_tcp_init_iter(req, READ);
		}

		/* we can read only from what is left in this bio */
		recv_len = min_t(size_t, recv_len,
				iov_iter_count(&req->iter));

		if (queue->data_digest)
			ret = skb_copy_and_hash_datagram_iter(skb, *offset,
				&req->iter, recv_len, queue->rcv_hash);
		else
			ret = skb_copy_datagram_iter(skb, *offset,
					&req->iter, recv_len);
		if (ret) {
			dev_err(queue->ctrl->ctrl.device,
				"queue %d failed to copy request %#x data",
				nvme_tcp_queue_id(queue), rq->tag);
			return ret;
		}

		*len -= recv_len;
		*offset += recv_len;
		queue->data_remaining -= recv_len;
	}

	if (!queue->data_remaining) {
		if (queue->data_digest) {
			nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
			queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
		} else {
S
Sagi Grimberg 已提交
760
			if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
761
				nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
S
Sagi Grimberg 已提交
762 763
				queue->nr_cqe++;
			}
764 765 766 767 768 769 770 771 772 773
			nvme_tcp_init_recv_ctx(queue);
		}
	}

	return 0;
}

static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
		struct sk_buff *skb, unsigned int *offset, size_t *len)
{
774
	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
	char *ddgst = (char *)&queue->recv_ddgst;
	size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
	off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
	int ret;

	ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
	if (unlikely(ret))
		return ret;

	queue->ddgst_remaining -= recv_len;
	*offset += recv_len;
	*len -= recv_len;
	if (queue->ddgst_remaining)
		return 0;

	if (queue->recv_ddgst != queue->exp_ddgst) {
		dev_err(queue->ctrl->ctrl.device,
			"data digest error: recv %#x expected %#x\n",
			le32_to_cpu(queue->recv_ddgst),
			le32_to_cpu(queue->exp_ddgst));
		return -EIO;
	}

798
	if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
799 800
		struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
					pdu->command_id);
801 802

		nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
S
Sagi Grimberg 已提交
803
		queue->nr_cqe++;
804 805
	}

806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
	nvme_tcp_init_recv_ctx(queue);
	return 0;
}

static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
			     unsigned int offset, size_t len)
{
	struct nvme_tcp_queue *queue = desc->arg.data;
	size_t consumed = len;
	int result;

	while (len) {
		switch (nvme_tcp_recv_state(queue)) {
		case NVME_TCP_RECV_PDU:
			result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
			break;
		case NVME_TCP_RECV_DATA:
			result = nvme_tcp_recv_data(queue, skb, &offset, &len);
			break;
		case NVME_TCP_RECV_DDGST:
			result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
			break;
		default:
			result = -EFAULT;
		}
		if (result) {
			dev_err(queue->ctrl->ctrl.device,
				"receive failed:  %d\n", result);
			queue->rd_enabled = false;
			nvme_tcp_error_recovery(&queue->ctrl->ctrl);
			return result;
		}
	}

	return consumed;
}

static void nvme_tcp_data_ready(struct sock *sk)
{
	struct nvme_tcp_queue *queue;

847
	read_lock_bh(&sk->sk_callback_lock);
848
	queue = sk->sk_user_data;
849 850
	if (likely(queue && queue->rd_enabled) &&
	    !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
851
		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
852
	read_unlock_bh(&sk->sk_callback_lock);
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
}

static void nvme_tcp_write_space(struct sock *sk)
{
	struct nvme_tcp_queue *queue;

	read_lock_bh(&sk->sk_callback_lock);
	queue = sk->sk_user_data;
	if (likely(queue && sk_stream_is_writeable(sk))) {
		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
	}
	read_unlock_bh(&sk->sk_callback_lock);
}

static void nvme_tcp_state_change(struct sock *sk)
{
	struct nvme_tcp_queue *queue;

872
	read_lock_bh(&sk->sk_callback_lock);
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
	queue = sk->sk_user_data;
	if (!queue)
		goto done;

	switch (sk->sk_state) {
	case TCP_CLOSE:
	case TCP_CLOSE_WAIT:
	case TCP_LAST_ACK:
	case TCP_FIN_WAIT1:
	case TCP_FIN_WAIT2:
		nvme_tcp_error_recovery(&queue->ctrl->ctrl);
		break;
	default:
		dev_info(queue->ctrl->ctrl.device,
			"queue %d socket state %d\n",
			nvme_tcp_queue_id(queue), sk->sk_state);
	}

	queue->state_change(sk);
done:
893
	read_unlock_bh(&sk->sk_callback_lock);
894 895
}

896 897 898 899 900 901
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
	return !list_empty(&queue->send_list) ||
		!llist_empty(&queue->req_list) || queue->more_requests;
}

902 903 904 905 906 907 908
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
{
	queue->request = NULL;
}

static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
909
	nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
910 911 912 913 914 915 916 917 918 919 920 921 922
}

static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
	struct nvme_tcp_queue *queue = req->queue;

	while (true) {
		struct page *page = nvme_tcp_req_cur_page(req);
		size_t offset = nvme_tcp_req_cur_offset(req);
		size_t len = nvme_tcp_req_cur_length(req);
		bool last = nvme_tcp_pdu_last_send(req, len);
		int ret, flags = MSG_DONTWAIT;

923
		if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
924 925
			flags |= MSG_EOR;
		else
926
			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
927

928 929
		if (sendpage_ok(page)) {
			ret = kernel_sendpage(queue->sock, page, offset, len,
930 931
					flags);
		} else {
932
			ret = sock_no_sendpage(queue->sock, page, offset, len,
933 934
					flags);
		}
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
		if (ret <= 0)
			return ret;

		if (queue->data_digest)
			nvme_tcp_ddgst_update(queue->snd_hash, page,
					offset, ret);

		/* fully successful last write*/
		if (last && ret == len) {
			if (queue->data_digest) {
				nvme_tcp_ddgst_final(queue->snd_hash,
					&req->ddgst);
				req->state = NVME_TCP_SEND_DDGST;
				req->offset = 0;
			} else {
				nvme_tcp_done_send_req(queue);
			}
			return 1;
		}
954
		nvme_tcp_advance_req(req, ret);
955 956 957 958 959 960 961 962 963 964 965
	}
	return -EAGAIN;
}

static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
{
	struct nvme_tcp_queue *queue = req->queue;
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
	bool inline_data = nvme_tcp_has_inline_data(req);
	u8 hdgst = nvme_tcp_hdgst_len(queue);
	int len = sizeof(*pdu) + hdgst - req->offset;
966
	int flags = MSG_DONTWAIT;
967 968
	int ret;

969
	if (inline_data || nvme_tcp_queue_more(queue))
970 971 972 973
		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
	else
		flags |= MSG_EOR;

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	if (queue->hdr_digest && !req->offset)
		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));

	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
			offset_in_page(pdu) + req->offset, len,  flags);
	if (unlikely(ret <= 0))
		return ret;

	len -= ret;
	if (!len) {
		if (inline_data) {
			req->state = NVME_TCP_SEND_DATA;
			if (queue->data_digest)
				crypto_ahash_init(queue->snd_hash);
		} else {
			nvme_tcp_done_send_req(queue);
		}
		return 1;
	}
	req->offset += ret;

	return -EAGAIN;
}

static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
{
	struct nvme_tcp_queue *queue = req->queue;
	struct nvme_tcp_data_pdu *pdu = req->pdu;
	u8 hdgst = nvme_tcp_hdgst_len(queue);
	int len = sizeof(*pdu) - req->offset + hdgst;
	int ret;

	if (queue->hdr_digest && !req->offset)
		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));

	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
			offset_in_page(pdu) + req->offset, len,
1011
			MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	if (unlikely(ret <= 0))
		return ret;

	len -= ret;
	if (!len) {
		req->state = NVME_TCP_SEND_DATA;
		if (queue->data_digest)
			crypto_ahash_init(queue->snd_hash);
		return 1;
	}
	req->offset += ret;

	return -EAGAIN;
}

static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
	struct nvme_tcp_queue *queue = req->queue;
	int ret;
1031
	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1032 1033 1034 1035 1036
	struct kvec iov = {
		.iov_base = &req->ddgst + req->offset,
		.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
	};

1037 1038 1039 1040 1041
	if (nvme_tcp_queue_more(queue))
		msg.msg_flags |= MSG_MORE;
	else
		msg.msg_flags |= MSG_EOR;

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
	if (unlikely(ret <= 0))
		return ret;

	if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
		nvme_tcp_done_send_req(queue);
		return 1;
	}

	req->offset += ret;
	return -EAGAIN;
}

static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_request *req;
	int ret = 1;

	if (!queue->request) {
		queue->request = nvme_tcp_fetch_request(queue);
		if (!queue->request)
			return 0;
	}
	req = queue->request;

	if (req->state == NVME_TCP_SEND_CMD_PDU) {
		ret = nvme_tcp_try_send_cmd_pdu(req);
		if (ret <= 0)
			goto done;
		if (!nvme_tcp_has_inline_data(req))
			return ret;
	}

	if (req->state == NVME_TCP_SEND_H2C_PDU) {
		ret = nvme_tcp_try_send_data_pdu(req);
		if (ret <= 0)
			goto done;
	}

	if (req->state == NVME_TCP_SEND_DATA) {
		ret = nvme_tcp_try_send_data(req);
		if (ret <= 0)
			goto done;
	}

	if (req->state == NVME_TCP_SEND_DDGST)
		ret = nvme_tcp_try_send_ddgst(req);
done:
1090
	if (ret == -EAGAIN) {
1091
		ret = 0;
1092 1093 1094 1095 1096 1097 1098
	} else if (ret < 0) {
		dev_err(queue->ctrl->ctrl.device,
			"failed to send request %d\n", ret);
		if (ret != -EPIPE && ret != -ECONNRESET)
			nvme_tcp_fail_request(queue->request);
		nvme_tcp_done_send_req(queue);
	}
1099 1100 1101 1102 1103
	return ret;
}

static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
{
1104 1105
	struct socket *sock = queue->sock;
	struct sock *sk = sock->sk;
1106 1107 1108 1109 1110 1111
	read_descriptor_t rd_desc;
	int consumed;

	rd_desc.arg.data = queue;
	rd_desc.count = 1;
	lock_sock(sk);
S
Sagi Grimberg 已提交
1112
	queue->nr_cqe = 0;
1113
	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1114 1115 1116 1117 1118 1119 1120 1121
	release_sock(sk);
	return consumed;
}

static void nvme_tcp_io_work(struct work_struct *w)
{
	struct nvme_tcp_queue *queue =
		container_of(w, struct nvme_tcp_queue, io_work);
1122
	unsigned long deadline = jiffies + msecs_to_jiffies(1);
1123 1124 1125 1126 1127

	do {
		bool pending = false;
		int result;

1128 1129 1130 1131 1132 1133 1134
		if (mutex_trylock(&queue->send_mutex)) {
			result = nvme_tcp_try_send(queue);
			mutex_unlock(&queue->send_mutex);
			if (result > 0)
				pending = true;
			else if (unlikely(result < 0))
				break;
1135 1136
		} else
			pending = !llist_empty(&queue->req_list);
1137 1138 1139 1140

		result = nvme_tcp_try_recv(queue);
		if (result > 0)
			pending = true;
1141
		else if (unlikely(result < 0))
1142
			return;
1143 1144 1145 1146

		if (!pending)
			return;

1147
	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222

	queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}

static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);

	ahash_request_free(queue->rcv_hash);
	ahash_request_free(queue->snd_hash);
	crypto_free_ahash(tfm);
}

static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
{
	struct crypto_ahash *tfm;

	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!queue->snd_hash)
		goto free_tfm;
	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);

	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!queue->rcv_hash)
		goto free_snd_hash;
	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);

	return 0;
free_snd_hash:
	ahash_request_free(queue->snd_hash);
free_tfm:
	crypto_free_ahash(tfm);
	return -ENOMEM;
}

static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
{
	struct nvme_tcp_request *async = &ctrl->async_req;

	page_frag_free(async->pdu);
}

static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
{
	struct nvme_tcp_queue *queue = &ctrl->queues[0];
	struct nvme_tcp_request *async = &ctrl->async_req;
	u8 hdgst = nvme_tcp_hdgst_len(queue);

	async->pdu = page_frag_alloc(&queue->pf_cache,
		sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
		GFP_KERNEL | __GFP_ZERO);
	if (!async->pdu)
		return -ENOMEM;

	async->queue = &ctrl->queues[0];
	return 0;
}

static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct nvme_tcp_queue *queue = &ctrl->queues[qid];

	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
		return;

	if (queue->hdr_digest || queue->data_digest)
		nvme_tcp_free_crypto(queue);

	sock_release(queue->sock);
	kfree(queue->pdu);
1223
	mutex_destroy(&queue->send_mutex);
1224
	mutex_destroy(&queue->queue_lock);
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
}

static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_icreq_pdu *icreq;
	struct nvme_tcp_icresp_pdu *icresp;
	struct msghdr msg = {};
	struct kvec iov;
	bool ctrl_hdgst, ctrl_ddgst;
	int ret;

	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
	if (!icreq)
		return -ENOMEM;

	icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
	if (!icresp) {
		ret = -ENOMEM;
		goto free_icreq;
	}

	icreq->hdr.type = nvme_tcp_icreq;
	icreq->hdr.hlen = sizeof(*icreq);
	icreq->hdr.pdo = 0;
	icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
	icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
	icreq->maxr2t = 0; /* single inflight r2t supported */
	icreq->hpda = 0; /* no alignment constraint */
	if (queue->hdr_digest)
		icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
	if (queue->data_digest)
		icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;

	iov.iov_base = icreq;
	iov.iov_len = sizeof(*icreq);
	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
	if (ret < 0)
		goto free_icresp;

	memset(&msg, 0, sizeof(msg));
	iov.iov_base = icresp;
	iov.iov_len = sizeof(*icresp);
	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
			iov.iov_len, msg.msg_flags);
	if (ret < 0)
		goto free_icresp;

	ret = -EINVAL;
	if (icresp->hdr.type != nvme_tcp_icresp) {
		pr_err("queue %d: bad type returned %d\n",
			nvme_tcp_queue_id(queue), icresp->hdr.type);
		goto free_icresp;
	}

	if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
		pr_err("queue %d: bad pdu length returned %d\n",
			nvme_tcp_queue_id(queue), icresp->hdr.plen);
		goto free_icresp;
	}

	if (icresp->pfv != NVME_TCP_PFV_1_0) {
		pr_err("queue %d: bad pfv returned %d\n",
			nvme_tcp_queue_id(queue), icresp->pfv);
		goto free_icresp;
	}

	ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
	if ((queue->data_digest && !ctrl_ddgst) ||
	    (!queue->data_digest && ctrl_ddgst)) {
		pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
			nvme_tcp_queue_id(queue),
			queue->data_digest ? "enabled" : "disabled",
			ctrl_ddgst ? "enabled" : "disabled");
		goto free_icresp;
	}

	ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
	if ((queue->hdr_digest && !ctrl_hdgst) ||
	    (!queue->hdr_digest && ctrl_hdgst)) {
		pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
			nvme_tcp_queue_id(queue),
			queue->hdr_digest ? "enabled" : "disabled",
			ctrl_hdgst ? "enabled" : "disabled");
		goto free_icresp;
	}

	if (icresp->cpda != 0) {
		pr_err("queue %d: unsupported cpda returned %d\n",
			nvme_tcp_queue_id(queue), icresp->cpda);
		goto free_icresp;
	}

	ret = 0;
free_icresp:
	kfree(icresp);
free_icreq:
	kfree(icreq);
	return ret;
}

1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
{
	return nvme_tcp_queue_id(queue) == 0;
}

static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
	int qid = nvme_tcp_queue_id(queue);

	return !nvme_tcp_admin_queue(queue) &&
		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
}

static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
	int qid = nvme_tcp_queue_id(queue);

	return !nvme_tcp_admin_queue(queue) &&
		!nvme_tcp_default_queue(queue) &&
		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
			  ctrl->io_queues[HCTX_TYPE_READ];
}

static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
	int qid = nvme_tcp_queue_id(queue);

	return !nvme_tcp_admin_queue(queue) &&
		!nvme_tcp_default_queue(queue) &&
		!nvme_tcp_read_queue(queue) &&
		qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
			  ctrl->io_queues[HCTX_TYPE_READ] +
			  ctrl->io_queues[HCTX_TYPE_POLL];
}

static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_ctrl *ctrl = queue->ctrl;
	int qid = nvme_tcp_queue_id(queue);
	int n = 0;

	if (nvme_tcp_default_queue(queue))
		n = qid - 1;
	else if (nvme_tcp_read_queue(queue))
		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
	else if (nvme_tcp_poll_queue(queue))
		n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
				ctrl->io_queues[HCTX_TYPE_READ] - 1;
	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}

1379 1380 1381 1382 1383
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
		int qid, size_t queue_size)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
C
Christoph Hellwig 已提交
1384
	int ret, rcv_pdu_size;
1385

1386
	mutex_init(&queue->queue_lock);
1387
	queue->ctrl = ctrl;
1388
	init_llist_head(&queue->req_list);
1389
	INIT_LIST_HEAD(&queue->send_list);
1390
	mutex_init(&queue->send_mutex);
1391 1392 1393 1394
	INIT_WORK(&queue->io_work, nvme_tcp_io_work);
	queue->queue_size = queue_size;

	if (qid > 0)
1395
		queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1396 1397 1398 1399 1400 1401 1402
	else
		queue->cmnd_capsule_len = sizeof(struct nvme_command) +
						NVME_TCP_ADMIN_CCSZ;

	ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
			IPPROTO_TCP, &queue->sock);
	if (ret) {
1403
		dev_err(nctrl->device,
1404
			"failed to create socket: %d\n", ret);
1405
		goto err_destroy_mutex;
1406 1407 1408
	}

	/* Single syn retry */
C
Christoph Hellwig 已提交
1409
	tcp_sock_set_syncnt(queue->sock->sk, 1);
1410 1411

	/* Set TCP no delay */
1412
	tcp_sock_set_nodelay(queue->sock->sk);
1413 1414 1415 1416 1417 1418

	/*
	 * Cleanup whatever is sitting in the TCP transmit queue on socket
	 * close. This is done to prevent stale data from being sent should
	 * the network connection be restored before TCP times out.
	 */
C
Christoph Hellwig 已提交
1419
	sock_no_linger(queue->sock->sk);
1420

C
Christoph Hellwig 已提交
1421 1422
	if (so_priority > 0)
		sock_set_priority(queue->sock->sk, so_priority);
1423

1424
	/* Set socket type of service */
C
Christoph Hellwig 已提交
1425 1426
	if (nctrl->opts->tos >= 0)
		ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1427

1428 1429 1430
	/* Set 10 seconds timeout for icresp recvmsg */
	queue->sock->sk->sk_rcvtimeo = 10 * HZ;

1431
	queue->sock->sk->sk_allocation = GFP_ATOMIC;
1432
	nvme_tcp_set_queue_io_cpu(queue);
1433 1434 1435 1436 1437 1438 1439
	queue->request = NULL;
	queue->data_remaining = 0;
	queue->ddgst_remaining = 0;
	queue->pdu_remaining = 0;
	queue->pdu_offset = 0;
	sk_set_memalloc(queue->sock->sk);

1440
	if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1441 1442 1443
		ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
			sizeof(ctrl->src_addr));
		if (ret) {
1444
			dev_err(nctrl->device,
1445 1446 1447 1448 1449 1450
				"failed to bind queue %d socket %d\n",
				qid, ret);
			goto err_sock;
		}
	}

1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
	if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
		char *iface = nctrl->opts->host_iface;
		sockptr_t optval = KERNEL_SOCKPTR(iface);

		ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
				      optval, strlen(iface));
		if (ret) {
			dev_err(nctrl->device,
			  "failed to bind to interface %s queue %d err %d\n",
			  iface, qid, ret);
			goto err_sock;
		}
	}

1465 1466 1467 1468 1469
	queue->hdr_digest = nctrl->opts->hdr_digest;
	queue->data_digest = nctrl->opts->data_digest;
	if (queue->hdr_digest || queue->data_digest) {
		ret = nvme_tcp_alloc_crypto(queue);
		if (ret) {
1470
			dev_err(nctrl->device,
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
				"failed to allocate queue %d crypto\n", qid);
			goto err_sock;
		}
	}

	rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
			nvme_tcp_hdgst_len(queue);
	queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
	if (!queue->pdu) {
		ret = -ENOMEM;
		goto err_crypto;
	}

1484
	dev_dbg(nctrl->device, "connecting queue %d\n",
1485 1486 1487 1488 1489
			nvme_tcp_queue_id(queue));

	ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
		sizeof(ctrl->addr), 0);
	if (ret) {
1490
		dev_err(nctrl->device,
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
			"failed to connect socket: %d\n", ret);
		goto err_rcv_pdu;
	}

	ret = nvme_tcp_init_connection(queue);
	if (ret)
		goto err_init_connect;

	queue->rd_enabled = true;
	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
	nvme_tcp_init_recv_ctx(queue);

	write_lock_bh(&queue->sock->sk->sk_callback_lock);
	queue->sock->sk->sk_user_data = queue;
	queue->state_change = queue->sock->sk->sk_state_change;
	queue->data_ready = queue->sock->sk->sk_data_ready;
	queue->write_space = queue->sock->sk->sk_write_space;
	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1511
#ifdef CONFIG_NET_RX_BUSY_POLL
S
Sagi Grimberg 已提交
1512
	queue->sock->sk->sk_ll_usec = 1;
1513
#endif
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
	write_unlock_bh(&queue->sock->sk->sk_callback_lock);

	return 0;

err_init_connect:
	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
err_rcv_pdu:
	kfree(queue->pdu);
err_crypto:
	if (queue->hdr_digest || queue->data_digest)
		nvme_tcp_free_crypto(queue);
err_sock:
	sock_release(queue->sock);
	queue->sock = NULL;
1528
err_destroy_mutex:
1529
	mutex_destroy(&queue->send_mutex);
1530
	mutex_destroy(&queue->queue_lock);
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
	return ret;
}

static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
{
	struct socket *sock = queue->sock;

	write_lock_bh(&sock->sk->sk_callback_lock);
	sock->sk->sk_user_data  = NULL;
	sock->sk->sk_data_ready = queue->data_ready;
	sock->sk->sk_state_change = queue->state_change;
	sock->sk->sk_write_space  = queue->write_space;
	write_unlock_bh(&sock->sk->sk_callback_lock);
}

static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
{
	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
	nvme_tcp_restore_sock_calls(queue);
	cancel_work_sync(&queue->io_work);
}

static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct nvme_tcp_queue *queue = &ctrl->queues[qid];

1558 1559 1560 1561
	mutex_lock(&queue->queue_lock);
	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
		__nvme_tcp_stop_queue(queue);
	mutex_unlock(&queue->queue_lock);
1562 1563 1564 1565 1566 1567 1568 1569
}

static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	int ret;

	if (idx)
1570
		ret = nvmf_connect_io_queue(nctrl, idx);
1571 1572 1573 1574 1575 1576
	else
		ret = nvmf_connect_admin_queue(nctrl);

	if (!ret) {
		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
	} else {
1577 1578
		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
		dev_err(nctrl->device,
			"failed to connect queue: %d ret=%d\n", idx, ret);
	}
	return ret;
}

static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
		bool admin)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct blk_mq_tag_set *set;
	int ret;

	if (admin) {
		set = &ctrl->admin_tag_set;
		memset(set, 0, sizeof(*set));
		set->ops = &nvme_tcp_admin_mq_ops;
		set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1597
		set->reserved_tags = NVMF_RESERVED_TAGS;
1598
		set->numa_node = nctrl->numa_node;
1599
		set->flags = BLK_MQ_F_BLOCKING;
1600 1601 1602
		set->cmd_size = sizeof(struct nvme_tcp_request);
		set->driver_data = ctrl;
		set->nr_hw_queues = 1;
1603
		set->timeout = NVME_ADMIN_TIMEOUT;
1604 1605 1606 1607 1608
	} else {
		set = &ctrl->tag_set;
		memset(set, 0, sizeof(*set));
		set->ops = &nvme_tcp_mq_ops;
		set->queue_depth = nctrl->sqsize + 1;
1609
		set->reserved_tags = NVMF_RESERVED_TAGS;
1610
		set->numa_node = nctrl->numa_node;
1611
		set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1612 1613 1614 1615
		set->cmd_size = sizeof(struct nvme_tcp_request);
		set->driver_data = ctrl;
		set->nr_hw_queues = nctrl->queue_count - 1;
		set->timeout = NVME_IO_TIMEOUT;
S
Sagi Grimberg 已提交
1616
		set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
	}

	ret = blk_mq_alloc_tag_set(set);
	if (ret)
		return ERR_PTR(ret);

	return set;
}

static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{
	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1629
		cancel_work_sync(&ctrl->async_event_work);
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
	}

	nvme_tcp_free_queue(ctrl, 0);
}

static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
{
	int i;

	for (i = 1; i < ctrl->queue_count; i++)
		nvme_tcp_free_queue(ctrl, i);
}

static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
{
	int i;

	for (i = 1; i < ctrl->queue_count; i++)
		nvme_tcp_stop_queue(ctrl, i);
}

static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
{
	int i, ret = 0;

	for (i = 1; i < ctrl->queue_count; i++) {
		ret = nvme_tcp_start_queue(ctrl, i);
		if (ret)
			goto out_stop_queues;
	}

	return 0;

out_stop_queues:
	for (i--; i >= 1; i--)
		nvme_tcp_stop_queue(ctrl, i);
	return ret;
}

static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{
	int ret;

	ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
	if (ret)
		return ret;

	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
	if (ret)
		goto out_free_queue;

	return 0;

out_free_queue:
	nvme_tcp_free_queue(ctrl, 0);
	return ret;
}

1690
static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
{
	int i, ret;

	for (i = 1; i < ctrl->queue_count; i++) {
		ret = nvme_tcp_alloc_queue(ctrl, i,
				ctrl->sqsize + 1);
		if (ret)
			goto out_free_queues;
	}

	return 0;

out_free_queues:
	for (i--; i >= 1; i--)
		nvme_tcp_free_queue(ctrl, i);

	return ret;
}

static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
{
1712 1713 1714 1715
	unsigned int nr_io_queues;

	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
S
Sagi Grimberg 已提交
1716
	nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1717 1718

	return nr_io_queues;
1719 1720
}

1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
		unsigned int nr_io_queues)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct nvmf_ctrl_options *opts = nctrl->opts;

	if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
		/*
		 * separate read/write queues
		 * hand out dedicated default queues only after we have
		 * sufficient read queues.
		 */
		ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
			min(opts->nr_write_queues, nr_io_queues);
		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
	} else {
		/*
		 * shared read/write queues
		 * either no write queues were requested, or we don't have
		 * sufficient queue count to have dedicated default queues.
		 */
		ctrl->io_queues[HCTX_TYPE_DEFAULT] =
			min(opts->nr_io_queues, nr_io_queues);
		nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
	}
S
Sagi Grimberg 已提交
1748 1749 1750 1751 1752 1753

	if (opts->nr_poll_queues && nr_io_queues) {
		/* map dedicated poll queues only if we have queues left */
		ctrl->io_queues[HCTX_TYPE_POLL] =
			min(opts->nr_poll_queues, nr_io_queues);
	}
1754 1755
}

1756
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1757 1758 1759 1760 1761 1762 1763 1764 1765
{
	unsigned int nr_io_queues;
	int ret;

	nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
	ret = nvme_set_queue_count(ctrl, &nr_io_queues);
	if (ret)
		return ret;

1766
	if (nr_io_queues == 0) {
1767 1768 1769 1770
		dev_err(ctrl->device,
			"unable to set any I/O queues\n");
		return -ENOMEM;
	}
1771

1772
	ctrl->queue_count = nr_io_queues + 1;
1773 1774 1775
	dev_info(ctrl->device,
		"creating %d I/O queues.\n", nr_io_queues);

1776 1777
	nvme_tcp_set_io_queues(ctrl, nr_io_queues);

1778
	return __nvme_tcp_alloc_io_queues(ctrl);
1779 1780 1781 1782 1783 1784
}

static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
	nvme_tcp_stop_io_queues(ctrl);
	if (remove) {
1785
		blk_cleanup_queue(ctrl->connect_q);
1786 1787 1788 1789 1790 1791 1792 1793 1794
		blk_mq_free_tag_set(ctrl->tagset);
	}
	nvme_tcp_free_io_queues(ctrl);
}

static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
	int ret;

1795
	ret = nvme_tcp_alloc_io_queues(ctrl);
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
	if (ret)
		return ret;

	if (new) {
		ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
		if (IS_ERR(ctrl->tagset)) {
			ret = PTR_ERR(ctrl->tagset);
			goto out_free_io_queues;
		}

1806 1807 1808 1809
		ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
		if (IS_ERR(ctrl->connect_q)) {
			ret = PTR_ERR(ctrl->connect_q);
			goto out_free_tag_set;
1810 1811 1812 1813 1814 1815 1816
		}
	}

	ret = nvme_tcp_start_io_queues(ctrl);
	if (ret)
		goto out_cleanup_connect_q;

1817 1818
	if (!new) {
		nvme_start_queues(ctrl);
1819 1820 1821 1822 1823 1824 1825 1826 1827
		if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
			/*
			 * If we timed out waiting for freeze we are likely to
			 * be stuck.  Fail the controller initialization just
			 * to be safe.
			 */
			ret = -ENODEV;
			goto out_wait_freeze_timed_out;
		}
1828 1829 1830 1831 1832
		blk_mq_update_nr_hw_queues(ctrl->tagset,
			ctrl->queue_count - 1);
		nvme_unfreeze(ctrl);
	}

1833 1834
	return 0;

1835 1836
out_wait_freeze_timed_out:
	nvme_stop_queues(ctrl);
1837
	nvme_sync_io_queues(ctrl);
1838
	nvme_tcp_stop_io_queues(ctrl);
1839
out_cleanup_connect_q:
1840
	nvme_cancel_tagset(ctrl);
1841
	if (new)
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
		blk_cleanup_queue(ctrl->connect_q);
out_free_tag_set:
	if (new)
		blk_mq_free_tag_set(ctrl->tagset);
out_free_io_queues:
	nvme_tcp_free_io_queues(ctrl);
	return ret;
}

static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
	nvme_tcp_stop_queue(ctrl, 0);
	if (remove) {
		blk_cleanup_queue(ctrl->admin_q);
1856
		blk_cleanup_queue(ctrl->fabrics_q);
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
		blk_mq_free_tag_set(ctrl->admin_tagset);
	}
	nvme_tcp_free_admin_queue(ctrl);
}

static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
{
	int error;

	error = nvme_tcp_alloc_admin_queue(ctrl);
	if (error)
		return error;

	if (new) {
		ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
		if (IS_ERR(ctrl->admin_tagset)) {
			error = PTR_ERR(ctrl->admin_tagset);
			goto out_free_queue;
		}

1877 1878 1879 1880 1881 1882
		ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
		if (IS_ERR(ctrl->fabrics_q)) {
			error = PTR_ERR(ctrl->fabrics_q);
			goto out_free_tagset;
		}

1883 1884 1885
		ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
		if (IS_ERR(ctrl->admin_q)) {
			error = PTR_ERR(ctrl->admin_q);
1886
			goto out_cleanup_fabrics_q;
1887 1888 1889 1890 1891 1892 1893
		}
	}

	error = nvme_tcp_start_queue(ctrl, 0);
	if (error)
		goto out_cleanup_queue;

1894
	error = nvme_enable_ctrl(ctrl);
1895 1896 1897
	if (error)
		goto out_stop_queue;

1898 1899
	blk_mq_unquiesce_queue(ctrl->admin_q);

1900
	error = nvme_init_ctrl_finish(ctrl);
1901
	if (error)
1902
		goto out_quiesce_queue;
1903 1904 1905

	return 0;

1906 1907 1908
out_quiesce_queue:
	blk_mq_quiesce_queue(ctrl->admin_q);
	blk_sync_queue(ctrl->admin_q);
1909 1910
out_stop_queue:
	nvme_tcp_stop_queue(ctrl, 0);
1911
	nvme_cancel_admin_tagset(ctrl);
1912 1913 1914
out_cleanup_queue:
	if (new)
		blk_cleanup_queue(ctrl->admin_q);
1915 1916 1917
out_cleanup_fabrics_q:
	if (new)
		blk_cleanup_queue(ctrl->fabrics_q);
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
out_free_tagset:
	if (new)
		blk_mq_free_tag_set(ctrl->admin_tagset);
out_free_queue:
	nvme_tcp_free_admin_queue(ctrl);
	return error;
}

static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
		bool remove)
{
	blk_mq_quiesce_queue(ctrl->admin_q);
1930
	blk_sync_queue(ctrl->admin_q);
1931
	nvme_tcp_stop_queue(ctrl, 0);
1932
	nvme_cancel_admin_tagset(ctrl);
1933 1934
	if (remove)
		blk_mq_unquiesce_queue(ctrl->admin_q);
1935 1936 1937 1938 1939 1940 1941
	nvme_tcp_destroy_admin_queue(ctrl, remove);
}

static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
		bool remove)
{
	if (ctrl->queue_count <= 1)
1942
		return;
1943
	blk_mq_quiesce_queue(ctrl->admin_q);
1944
	nvme_start_freeze(ctrl);
1945
	nvme_stop_queues(ctrl);
1946
	nvme_sync_io_queues(ctrl);
1947
	nvme_tcp_stop_io_queues(ctrl);
1948
	nvme_cancel_tagset(ctrl);
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
	if (remove)
		nvme_start_queues(ctrl);
	nvme_tcp_destroy_io_queues(ctrl, remove);
}

static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
{
	/* If we are resetting/deleting then do nothing */
	if (ctrl->state != NVME_CTRL_CONNECTING) {
		WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
			ctrl->state == NVME_CTRL_LIVE);
		return;
	}

	if (nvmf_should_reconnect(ctrl)) {
		dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
			ctrl->opts->reconnect_delay);
		queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
				ctrl->opts->reconnect_delay * HZ);
	} else {
		dev_info(ctrl->device, "Removing controller...\n");
		nvme_delete_ctrl(ctrl);
	}
}

static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
{
	struct nvmf_ctrl_options *opts = ctrl->opts;
1977
	int ret;
1978 1979 1980 1981 1982 1983

	ret = nvme_tcp_configure_admin_queue(ctrl, new);
	if (ret)
		return ret;

	if (ctrl->icdoff) {
1984
		ret = -EOPNOTSUPP;
1985 1986 1987 1988
		dev_err(ctrl->device, "icdoff is not supported!\n");
		goto destroy_admin;
	}

1989
	if (!nvme_ctrl_sgl_supported(ctrl)) {
1990
		ret = -EOPNOTSUPP;
1991 1992 1993 1994
		dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
		goto destroy_admin;
	}

1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
	if (opts->queue_size > ctrl->sqsize + 1)
		dev_warn(ctrl->device,
			"queue_size %zu > ctrl sqsize %u, clamping down\n",
			opts->queue_size, ctrl->sqsize + 1);

	if (ctrl->sqsize + 1 > ctrl->maxcmd) {
		dev_warn(ctrl->device,
			"sqsize %u > ctrl maxcmd %u, clamping down\n",
			ctrl->sqsize + 1, ctrl->maxcmd);
		ctrl->sqsize = ctrl->maxcmd - 1;
	}

	if (ctrl->queue_count > 1) {
		ret = nvme_tcp_configure_io_queues(ctrl, new);
		if (ret)
			goto destroy_admin;
	}

	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2014
		/*
2015
		 * state change failure is ok if we started ctrl delete,
2016 2017 2018
		 * unless we're during creation of a new controller to
		 * avoid races with teardown flow.
		 */
2019 2020
		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
			     ctrl->state != NVME_CTRL_DELETING_NOIO);
2021
		WARN_ON_ONCE(new);
2022 2023 2024 2025 2026 2027 2028 2029
		ret = -EINVAL;
		goto destroy_io;
	}

	nvme_start_ctrl(ctrl);
	return 0;

destroy_io:
2030 2031 2032 2033 2034
	if (ctrl->queue_count > 1) {
		nvme_stop_queues(ctrl);
		nvme_sync_io_queues(ctrl);
		nvme_tcp_stop_io_queues(ctrl);
		nvme_cancel_tagset(ctrl);
2035
		nvme_tcp_destroy_io_queues(ctrl, new);
2036
	}
2037
destroy_admin:
2038 2039
	blk_mq_quiesce_queue(ctrl->admin_q);
	blk_sync_queue(ctrl->admin_q);
2040
	nvme_tcp_stop_queue(ctrl, 0);
2041
	nvme_cancel_admin_tagset(ctrl);
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
	nvme_tcp_destroy_admin_queue(ctrl, new);
	return ret;
}

static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
{
	struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
			struct nvme_tcp_ctrl, connect_work);
	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;

	++ctrl->nr_reconnects;

	if (nvme_tcp_setup_ctrl(ctrl, false))
		goto requeue;

2057
	dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
			ctrl->nr_reconnects);

	ctrl->nr_reconnects = 0;

	return;

requeue:
	dev_info(ctrl->device, "Failed reconnect attempt %d\n",
			ctrl->nr_reconnects);
	nvme_tcp_reconnect_or_remove(ctrl);
}

static void nvme_tcp_error_recovery_work(struct work_struct *work)
{
	struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
				struct nvme_tcp_ctrl, err_work);
	struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;

	nvme_stop_keep_alive(ctrl);
	nvme_tcp_teardown_io_queues(ctrl, false);
	/* unquiesce to fail fast pending requests */
	nvme_start_queues(ctrl);
	nvme_tcp_teardown_admin_queue(ctrl, false);
2081
	blk_mq_unquiesce_queue(ctrl->admin_q);
2082 2083

	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2084 2085 2086
		/* state change failure is ok if we started ctrl delete */
		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
			     ctrl->state != NVME_CTRL_DELETING_NOIO);
2087 2088 2089 2090 2091 2092 2093 2094
		return;
	}

	nvme_tcp_reconnect_or_remove(ctrl);
}

static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
2095 2096 2097
	cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
	cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);

2098
	nvme_tcp_teardown_io_queues(ctrl, shutdown);
2099
	blk_mq_quiesce_queue(ctrl->admin_q);
2100 2101 2102
	if (shutdown)
		nvme_shutdown_ctrl(ctrl);
	else
2103
		nvme_disable_ctrl(ctrl);
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
	nvme_tcp_teardown_admin_queue(ctrl, shutdown);
}

static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
{
	nvme_tcp_teardown_ctrl(ctrl, true);
}

static void nvme_reset_ctrl_work(struct work_struct *work)
{
	struct nvme_ctrl *ctrl =
		container_of(work, struct nvme_ctrl, reset_work);

	nvme_stop_ctrl(ctrl);
	nvme_tcp_teardown_ctrl(ctrl, false);

	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2121 2122 2123
		/* state change failure is ok if we started ctrl delete */
		WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
			     ctrl->state != NVME_CTRL_DELETING_NOIO);
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209
		return;
	}

	if (nvme_tcp_setup_ctrl(ctrl, false))
		goto out_fail;

	return;

out_fail:
	++ctrl->nr_reconnects;
	nvme_tcp_reconnect_or_remove(ctrl);
}

static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);

	if (list_empty(&ctrl->list))
		goto free_ctrl;

	mutex_lock(&nvme_tcp_ctrl_mutex);
	list_del(&ctrl->list);
	mutex_unlock(&nvme_tcp_ctrl_mutex);

	nvmf_free_options(nctrl->opts);
free_ctrl:
	kfree(ctrl->queues);
	kfree(ctrl);
}

static void nvme_tcp_set_sg_null(struct nvme_command *c)
{
	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;

	sg->addr = 0;
	sg->length = 0;
	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
			NVME_SGL_FMT_TRANSPORT_A;
}

static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
		struct nvme_command *c, u32 data_len)
{
	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;

	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
	sg->length = cpu_to_le32(data_len);
	sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
}

static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
		u32 data_len)
{
	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;

	sg->addr = 0;
	sg->length = cpu_to_le32(data_len);
	sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
			NVME_SGL_FMT_TRANSPORT_A;
}

static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
	struct nvme_tcp_queue *queue = &ctrl->queues[0];
	struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
	struct nvme_command *cmd = &pdu->cmd;
	u8 hdgst = nvme_tcp_hdgst_len(queue);

	memset(pdu, 0, sizeof(*pdu));
	pdu->hdr.type = nvme_tcp_cmd;
	if (queue->hdr_digest)
		pdu->hdr.flags |= NVME_TCP_F_HDGST;
	pdu->hdr.hlen = sizeof(*pdu);
	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);

	cmd->common.opcode = nvme_admin_async_event;
	cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
	cmd->common.flags |= NVME_CMD_SGL_METABUF;
	nvme_tcp_set_sg_null(cmd);

	ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
	ctrl->async_req.offset = 0;
	ctrl->async_req.curr_bio = NULL;
	ctrl->async_req.data_len = 0;

2210
	nvme_tcp_queue_request(&ctrl->async_req, true, true);
2211 2212
}

S
Sagi Grimberg 已提交
2213 2214 2215 2216 2217 2218
static void nvme_tcp_complete_timed_out(struct request *rq)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;

	nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2219
	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
S
Sagi Grimberg 已提交
2220 2221 2222 2223 2224
		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
		blk_mq_complete_request(rq);
	}
}

2225 2226 2227 2228
static enum blk_eh_timer_return
nvme_tcp_timeout(struct request *rq, bool reserved)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
S
Sagi Grimberg 已提交
2229
	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2230 2231
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;

S
Sagi Grimberg 已提交
2232
	dev_warn(ctrl->device,
2233
		"queue %d: timeout request %#x type %d\n",
S
Sagi Grimberg 已提交
2234
		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2235

S
Sagi Grimberg 已提交
2236
	if (ctrl->state != NVME_CTRL_LIVE) {
S
Sagi Grimberg 已提交
2237
		/*
S
Sagi Grimberg 已提交
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
		 * If we are resetting, connecting or deleting we should
		 * complete immediately because we may block controller
		 * teardown or setup sequence
		 * - ctrl disable/shutdown fabrics requests
		 * - connect requests
		 * - initialization admin requests
		 * - I/O requests that entered after unquiescing and
		 *   the controller stopped responding
		 *
		 * All other requests should be cancelled by the error
		 * recovery work, so it's fine that we fail it here.
S
Sagi Grimberg 已提交
2249
		 */
S
Sagi Grimberg 已提交
2250
		nvme_tcp_complete_timed_out(rq);
2251 2252 2253
		return BLK_EH_DONE;
	}

S
Sagi Grimberg 已提交
2254 2255 2256 2257 2258
	/*
	 * LIVE state should trigger the normal error recovery which will
	 * handle completing this request.
	 */
	nvme_tcp_error_recovery(ctrl);
2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
	return BLK_EH_RESET_TIMER;
}

static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
			struct request *rq)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
	struct nvme_command *c = &pdu->cmd;

	c->common.flags |= NVME_CMD_SGL_METABUF;

2271 2272 2273
	if (!blk_rq_nr_phys_segments(rq))
		nvme_tcp_set_sg_null(c);
	else if (rq_data_dir(rq) == WRITE &&
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
	    req->data_len <= nvme_tcp_inline_data_size(queue))
		nvme_tcp_set_sg_inline(queue, c, req->data_len);
	else
		nvme_tcp_set_sg_host_data(c, req->data_len);

	return 0;
}

static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
		struct request *rq)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
	struct nvme_tcp_queue *queue = req->queue;
	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
	blk_status_t ret;

2291
	ret = nvme_setup_cmd(ns, rq);
2292 2293 2294 2295 2296 2297 2298 2299
	if (ret)
		return ret;

	req->state = NVME_TCP_SEND_CMD_PDU;
	req->offset = 0;
	req->data_sent = 0;
	req->pdu_len = 0;
	req->pdu_sent = 0;
2300 2301
	req->data_len = blk_rq_nr_phys_segments(rq) ?
				blk_rq_payload_bytes(rq) : 0;
2302
	req->curr_bio = rq->bio;
2303
	if (req->curr_bio && req->data_len)
2304
		nvme_tcp_init_iter(req, rq_data_dir(rq));
2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324

	if (rq_data_dir(rq) == WRITE &&
	    req->data_len <= nvme_tcp_inline_data_size(queue))
		req->pdu_len = req->data_len;

	pdu->hdr.type = nvme_tcp_cmd;
	pdu->hdr.flags = 0;
	if (queue->hdr_digest)
		pdu->hdr.flags |= NVME_TCP_F_HDGST;
	if (queue->data_digest && req->pdu_len) {
		pdu->hdr.flags |= NVME_TCP_F_DDGST;
		ddgst = nvme_tcp_ddgst_len(queue);
	}
	pdu->hdr.hlen = sizeof(*pdu);
	pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
	pdu->hdr.plen =
		cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);

	ret = nvme_tcp_map_data(queue, rq);
	if (unlikely(ret)) {
2325
		nvme_cleanup_cmd(rq);
2326 2327 2328 2329 2330 2331 2332 2333
		dev_err(queue->ctrl->ctrl.device,
			"Failed to map data (%d)\n", ret);
		return ret;
	}

	return 0;
}

2334 2335 2336 2337 2338 2339 2340 2341
static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
	struct nvme_tcp_queue *queue = hctx->driver_data;

	if (!llist_empty(&queue->req_list))
		queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}

2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
{
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_tcp_queue *queue = hctx->driver_data;
	struct request *rq = bd->rq;
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
	blk_status_t ret;

2352 2353
	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2354 2355 2356 2357 2358 2359 2360

	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
	if (unlikely(ret))
		return ret;

	blk_mq_start_request(rq);

2361
	nvme_tcp_queue_request(req, true, bd->last);
2362 2363 2364 2365

	return BLK_STS_OK;
}

2366 2367 2368
static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_tcp_ctrl *ctrl = set->driver_data;
2369
	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2370

2371
	if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2372 2373
		/* separate read/write queues */
		set->map[HCTX_TYPE_DEFAULT].nr_queues =
2374 2375 2376 2377
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
		set->map[HCTX_TYPE_READ].nr_queues =
			ctrl->io_queues[HCTX_TYPE_READ];
2378
		set->map[HCTX_TYPE_READ].queue_offset =
2379
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2380
	} else {
2381
		/* shared read/write queues */
2382
		set->map[HCTX_TYPE_DEFAULT].nr_queues =
2383 2384 2385 2386
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
		set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
		set->map[HCTX_TYPE_READ].nr_queues =
			ctrl->io_queues[HCTX_TYPE_DEFAULT];
2387 2388 2389 2390
		set->map[HCTX_TYPE_READ].queue_offset = 0;
	}
	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2391

S
Sagi Grimberg 已提交
2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
	if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
		/* map dedicated poll queues only if we have queues left */
		set->map[HCTX_TYPE_POLL].nr_queues =
				ctrl->io_queues[HCTX_TYPE_POLL];
		set->map[HCTX_TYPE_POLL].queue_offset =
			ctrl->io_queues[HCTX_TYPE_DEFAULT] +
			ctrl->io_queues[HCTX_TYPE_READ];
		blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
	}

2402
	dev_info(ctrl->ctrl.device,
S
Sagi Grimberg 已提交
2403
		"mapped %d/%d/%d default/read/poll queues.\n",
2404
		ctrl->io_queues[HCTX_TYPE_DEFAULT],
S
Sagi Grimberg 已提交
2405 2406
		ctrl->io_queues[HCTX_TYPE_READ],
		ctrl->io_queues[HCTX_TYPE_POLL]);
2407

2408 2409 2410
	return 0;
}

S
Sagi Grimberg 已提交
2411 2412 2413 2414 2415
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
{
	struct nvme_tcp_queue *queue = hctx->driver_data;
	struct sock *sk = queue->sock->sk;

2416 2417 2418
	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
		return 0;

2419
	set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2420
	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
S
Sagi Grimberg 已提交
2421 2422
		sk_busy_loop(sk, true);
	nvme_tcp_try_recv(queue);
2423
	clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
S
Sagi Grimberg 已提交
2424 2425 2426
	return queue->nr_cqe;
}

2427
static const struct blk_mq_ops nvme_tcp_mq_ops = {
2428
	.queue_rq	= nvme_tcp_queue_rq,
2429
	.commit_rqs	= nvme_tcp_commit_rqs,
2430 2431 2432 2433 2434
	.complete	= nvme_complete_rq,
	.init_request	= nvme_tcp_init_request,
	.exit_request	= nvme_tcp_exit_request,
	.init_hctx	= nvme_tcp_init_hctx,
	.timeout	= nvme_tcp_timeout,
2435
	.map_queues	= nvme_tcp_map_queues,
S
Sagi Grimberg 已提交
2436
	.poll		= nvme_tcp_poll,
2437 2438
};

2439
static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489
	.queue_rq	= nvme_tcp_queue_rq,
	.complete	= nvme_complete_rq,
	.init_request	= nvme_tcp_init_request,
	.exit_request	= nvme_tcp_exit_request,
	.init_hctx	= nvme_tcp_init_admin_hctx,
	.timeout	= nvme_tcp_timeout,
};

static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
	.name			= "tcp",
	.module			= THIS_MODULE,
	.flags			= NVME_F_FABRICS,
	.reg_read32		= nvmf_reg_read32,
	.reg_read64		= nvmf_reg_read64,
	.reg_write32		= nvmf_reg_write32,
	.free_ctrl		= nvme_tcp_free_ctrl,
	.submit_async_event	= nvme_tcp_submit_async_event,
	.delete_ctrl		= nvme_tcp_delete_ctrl,
	.get_address		= nvmf_get_address,
};

static bool
nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
{
	struct nvme_tcp_ctrl *ctrl;
	bool found = false;

	mutex_lock(&nvme_tcp_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
		found = nvmf_ip_options_match(&ctrl->ctrl, opts);
		if (found)
			break;
	}
	mutex_unlock(&nvme_tcp_ctrl_mutex);

	return found;
}

static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
		struct nvmf_ctrl_options *opts)
{
	struct nvme_tcp_ctrl *ctrl;
	int ret;

	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&ctrl->list);
	ctrl->ctrl.opts = opts;
S
Sagi Grimberg 已提交
2490 2491
	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
				opts->nr_poll_queues + 1;
2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
	ctrl->ctrl.sqsize = opts->queue_size - 1;
	ctrl->ctrl.kato = opts->kato;

	INIT_DELAYED_WORK(&ctrl->connect_work,
			nvme_tcp_reconnect_ctrl_work);
	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);

	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
		opts->trsvcid =
			kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
		if (!opts->trsvcid) {
			ret = -ENOMEM;
			goto out_free_ctrl;
		}
		opts->mask |= NVMF_OPT_TRSVCID;
	}

	ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
			opts->traddr, opts->trsvcid, &ctrl->addr);
	if (ret) {
		pr_err("malformed address passed: %s:%s\n",
			opts->traddr, opts->trsvcid);
		goto out_free_ctrl;
	}

	if (opts->mask & NVMF_OPT_HOST_TRADDR) {
		ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
			opts->host_traddr, NULL, &ctrl->src_addr);
		if (ret) {
			pr_err("malformed src address passed: %s\n",
			       opts->host_traddr);
			goto out_free_ctrl;
		}
	}

2528
	if (opts->mask & NVMF_OPT_HOST_IFACE) {
2529
		if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2530 2531 2532 2533 2534 2535 2536
			pr_err("invalid interface passed: %s\n",
			       opts->host_iface);
			ret = -ENODEV;
			goto out_free_ctrl;
		}
	}

2537 2538 2539 2540 2541
	if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
		ret = -EALREADY;
		goto out_free_ctrl;
	}

2542
	ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590
				GFP_KERNEL);
	if (!ctrl->queues) {
		ret = -ENOMEM;
		goto out_free_ctrl;
	}

	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
	if (ret)
		goto out_kfree_queues;

	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
		WARN_ON_ONCE(1);
		ret = -EINTR;
		goto out_uninit_ctrl;
	}

	ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
	if (ret)
		goto out_uninit_ctrl;

	dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
		ctrl->ctrl.opts->subsysnqn, &ctrl->addr);

	mutex_lock(&nvme_tcp_ctrl_mutex);
	list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
	mutex_unlock(&nvme_tcp_ctrl_mutex);

	return &ctrl->ctrl;

out_uninit_ctrl:
	nvme_uninit_ctrl(&ctrl->ctrl);
	nvme_put_ctrl(&ctrl->ctrl);
	if (ret > 0)
		ret = -EIO;
	return ERR_PTR(ret);
out_kfree_queues:
	kfree(ctrl->queues);
out_free_ctrl:
	kfree(ctrl);
	return ERR_PTR(ret);
}

static struct nvmf_transport_ops nvme_tcp_transport = {
	.name		= "tcp",
	.module		= THIS_MODULE,
	.required_opts	= NVMF_OPT_TRADDR,
	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2591
			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2592
			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2593
			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
	.create_ctrl	= nvme_tcp_create_ctrl,
};

static int __init nvme_tcp_init_module(void)
{
	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
	if (!nvme_tcp_wq)
		return -ENOMEM;

	nvmf_register_transport(&nvme_tcp_transport);
	return 0;
}

static void __exit nvme_tcp_cleanup_module(void)
{
	struct nvme_tcp_ctrl *ctrl;

	nvmf_unregister_transport(&nvme_tcp_transport);

	mutex_lock(&nvme_tcp_ctrl_mutex);
	list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
		nvme_delete_ctrl(&ctrl->ctrl);
	mutex_unlock(&nvme_tcp_ctrl_mutex);
	flush_workqueue(nvme_delete_wq);

	destroy_workqueue(nvme_tcp_wq);
}

module_init(nvme_tcp_init_module);
module_exit(nvme_tcp_cleanup_module);

MODULE_LICENSE("GPL v2");