nvme.h 24.8 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9
/*
 * Copyright (c) 2011-2014, Intel Corporation.
 */

#ifndef _NVME_H
#define _NVME_H

#include <linux/nvme.h>
10
#include <linux/cdev.h>
11 12 13
#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>
14
#include <linux/sed-opal.h>
T
Thomas Tai 已提交
15
#include <linux/fault-inject.h>
16
#include <linux/rcupdate.h>
17
#include <linux/wait.h>
18
#include <linux/t10-pi.h>
19

H
Hannes Reinecke 已提交
20 21
#include <trace/events/block.h>

22
extern unsigned int nvme_io_timeout;
23 24
#define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)

25
extern unsigned int admin_timeout;
26
#define NVME_ADMIN_TIMEOUT	(admin_timeout * HZ)
27

S
Sagi Grimberg 已提交
28 29
#define NVME_DEFAULT_KATO	5

30 31
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define  NVME_INLINE_SG_CNT  0
32
#define  NVME_INLINE_METADATA_SG_CNT  0
33 34
#else
#define  NVME_INLINE_SG_CNT  2
35
#define  NVME_INLINE_METADATA_SG_CNT  1
36 37
#endif

38 39 40 41 42 43 44 45
/*
 * Default to a 4K page size, with the intention to update this
 * path in the future to accommodate architectures with differing
 * kernel and IO page sizes.
 */
#define NVME_CTRL_PAGE_SHIFT	12
#define NVME_CTRL_PAGE_SIZE	(1 << NVME_CTRL_PAGE_SHIFT)

46
extern struct workqueue_struct *nvme_wq;
47 48
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
49

50
/*
51 52
 * List of workarounds for devices that required behavior not specified in
 * the standard.
53
 */
54 55 56 57 58 59
enum nvme_quirks {
	/*
	 * Prefers I/O aligned to a stripe size specified in a vendor
	 * specific Identify field.
	 */
	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
60 61 62 63 64 65

	/*
	 * The controller doesn't handle Identify value others than 0 or 1
	 * correctly.
	 */
	NVME_QUIRK_IDENTIFY_CNS			= (1 << 1),
66 67

	/*
68 69
	 * The controller deterministically returns O's on reads to
	 * logical blocks that deallocate was called on.
70
	 */
71
	NVME_QUIRK_DEALLOCATE_ZEROES		= (1 << 2),
72 73 74 75 76 77

	/*
	 * The controller needs a delay before starts checking the device
	 * readiness, which is done by reading the NVME_CSTS_RDY bit.
	 */
	NVME_QUIRK_DELAY_BEFORE_CHK_RDY		= (1 << 3),
78 79 80 81 82

	/*
	 * APST should not be used.
	 */
	NVME_QUIRK_NO_APST			= (1 << 4),
83 84 85 86 87

	/*
	 * The deepest sleep state should not be used.
	 */
	NVME_QUIRK_NO_DEEPEST_PS		= (1 << 5),
C
Christoph Hellwig 已提交
88

89 90 91 92
	/*
	 * Set MEDIUM priority on SQ creation
	 */
	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
93 94 95 96 97

	/*
	 * Ignore device provided subnqn.
	 */
	NVME_QUIRK_IGNORE_DEV_SUBNQN		= (1 << 8),
98 99 100 101 102

	/*
	 * Broken Write Zeroes.
	 */
	NVME_QUIRK_DISABLE_WRITE_ZEROES		= (1 << 9),
103 104 105 106 107

	/*
	 * Force simple suspend/resume path.
	 */
	NVME_QUIRK_SIMPLE_SUSPEND		= (1 << 10),
108

109 110 111
	/*
	 * Use only one interrupt vector for all queues
	 */
112
	NVME_QUIRK_SINGLE_VECTOR		= (1 << 11),
113 114 115 116

	/*
	 * Use non-standard 128 bytes SQEs.
	 */
117
	NVME_QUIRK_128_BYTES_SQES		= (1 << 12),
118 119 120 121

	/*
	 * Prevent tag overlap between queues
	 */
122
	NVME_QUIRK_SHARED_TAGS                  = (1 << 13),
123 124 125 126 127

	/*
	 * Don't change the value of the temperature threshold feature
	 */
	NVME_QUIRK_NO_TEMP_THRESH_CHANGE	= (1 << 14),
128 129 130 131 132 133 134

	/*
	 * The controller doesn't handle the Identify Namespace
	 * Identification Descriptor list subcommand despite claiming
	 * NVMe 1.3 compliance.
	 */
	NVME_QUIRK_NO_NS_DESC_LIST		= (1 << 15),
135 136 137 138 139 140

	/*
	 * The controller does not properly handle DMA addresses over
	 * 48 bits.
	 */
	NVME_QUIRK_DMA_ADDRESS_BITS_48		= (1 << 16),
141 142 143 144 145 146

	/*
	 * The controller requires the command_id value be be limited, so skip
	 * encoding the generation sequence number.
	 */
	NVME_QUIRK_SKIP_CID_GEN			= (1 << 17),
147 148
};

149 150 151 152 153 154 155
/*
 * Common request structure for NVMe passthrough.  All drivers must have
 * this structure as the first member of their request-private data.
 */
struct nvme_request {
	struct nvme_command	*cmd;
	union nvme_result	result;
156
	u8			genctr;
157
	u8			retries;
158 159
	u8			flags;
	u16			status;
160
	struct nvme_ctrl	*ctrl;
161 162
};

163 164 165 166 167
/*
 * Mark a bio as coming in through the mpath node.
 */
#define REQ_NVME_MPATH		REQ_DRV

168 169
enum {
	NVME_REQ_CANCELLED		= (1 << 0),
170
	NVME_REQ_USERCMD		= (1 << 1),
171 172 173 174 175 176 177
};

static inline struct nvme_request *nvme_req(struct request *req)
{
	return blk_mq_rq_to_pdu(req);
}

K
Keith Busch 已提交
178 179
static inline u16 nvme_req_qid(struct request *req)
{
K
Keith Busch 已提交
180
	if (!req->q->queuedata)
K
Keith Busch 已提交
181
		return 0;
B
Baolin Wang 已提交
182 183

	return req->mq_hctx->queue_num + 1;
K
Keith Busch 已提交
184 185
}

186 187 188 189 190
/* The below value is the specific amount of delay needed before checking
 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
 * found empirically.
 */
191
#define NVME_QUIRK_DELAY_AMOUNT		2300
192

193 194 195 196 197 198 199 200 201
/*
 * enum nvme_ctrl_state: Controller state
 *
 * @NVME_CTRL_NEW:		New controller just allocated, initial state
 * @NVME_CTRL_LIVE:		Controller is connected and I/O capable
 * @NVME_CTRL_RESETTING:	Controller is resetting (or scheduled reset)
 * @NVME_CTRL_CONNECTING:	Controller is disconnected, now connecting the
 *				transport
 * @NVME_CTRL_DELETING:		Controller is deleting (or scheduled deletion)
202 203 204 205 206
 * @NVME_CTRL_DELETING_NOIO:	Controller is deleting and I/O is not
 *				disabled/failed immediately. This state comes
 * 				after all async event processing took place and
 * 				before ns removal and the controller deletion
 * 				progress
207 208 209 210 211
 * @NVME_CTRL_DEAD:		Controller is non-present/unresponsive during
 *				shutdown or removal. In this case we forcibly
 *				kill all inflight I/O as they have no chance to
 *				complete
 */
212 213 214 215
enum nvme_ctrl_state {
	NVME_CTRL_NEW,
	NVME_CTRL_LIVE,
	NVME_CTRL_RESETTING,
216
	NVME_CTRL_CONNECTING,
217
	NVME_CTRL_DELETING,
218
	NVME_CTRL_DELETING_NOIO,
219
	NVME_CTRL_DEAD,
220 221
};

222 223 224 225 226 227 228 229 230
struct nvme_fault_inject {
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
	struct fault_attr attr;
	struct dentry *parent;
	bool dont_retry;	/* DNR, do not retry */
	u16 status;		/* status code */
#endif
};

231
struct nvme_ctrl {
232
	bool comp_seen;
233
	enum nvme_ctrl_state state;
234
	bool identified;
235
	spinlock_t lock;
236
	struct mutex scan_lock;
237
	const struct nvme_ctrl_ops *ops;
238
	struct request_queue *admin_q;
239
	struct request_queue *connect_q;
240
	struct request_queue *fabrics_q;
241 242
	struct device *dev;
	int instance;
243
	int numa_node;
244
	struct blk_mq_tag_set *tagset;
245
	struct blk_mq_tag_set *admin_tagset;
246
	struct list_head namespaces;
247
	struct rw_semaphore namespaces_rwsem;
248
	struct device ctrl_device;
249
	struct device *device;	/* char device */
250 251 252
#ifdef CONFIG_NVME_HWMON
	struct device *hwmon_device;
#endif
253
	struct cdev cdev;
254
	struct work_struct reset_work;
255
	struct work_struct delete_work;
256
	wait_queue_head_t state_wq;
257

C
Christoph Hellwig 已提交
258 259 260
	struct nvme_subsystem *subsys;
	struct list_head subsys_entry;

261
	struct opal_dev *opal_dev;
262

263
	char name[12];
C
Christoph Hellwig 已提交
264
	u16 cntlid;
265 266

	u32 ctrl_config;
267
	u16 mtfa;
268
	u32 queue_count;
269

270
	u64 cap;
271
	u32 max_hw_sectors;
272
	u32 max_segments;
273
	u32 max_integrity_segments;
274 275 276
	u32 max_discard_sectors;
	u32 max_discard_segments;
	u32 max_zeroes_sectors;
K
Keith Busch 已提交
277 278 279
#ifdef CONFIG_BLK_DEV_ZONED
	u32 max_zone_append;
#endif
280
	u16 crdt[3];
281
	u16 oncs;
282
	u16 oacs;
283 284
	u16 nssa;
	u16 nr_streams;
285
	u16 sqsize;
C
Christoph Hellwig 已提交
286
	u32 max_namespaces;
287
	atomic_t abort_limit;
288
	u8 vwc;
289
	u32 vs;
290
	u32 sgls;
S
Sagi Grimberg 已提交
291
	u16 kas;
292 293
	u8 npss;
	u8 apsta;
294 295
	u16 wctemp;
	u16 cctemp;
296
	u32 oaes;
297
	u32 aen_result;
S
Sagi Grimberg 已提交
298
	u32 ctratt;
299
	unsigned int shutdown_timeout;
S
Sagi Grimberg 已提交
300
	unsigned int kato;
301
	bool subsystem;
302
	unsigned long quirks;
303
	struct nvme_id_power_state psd[32];
304
	struct nvme_effects_log *effects;
305
	struct xarray cels;
306
	struct work_struct scan_work;
307
	struct work_struct async_event_work;
S
Sagi Grimberg 已提交
308
	struct delayed_work ka_work;
309
	struct delayed_work failfast_work;
310
	struct nvme_command ka_cmd;
311
	struct work_struct fw_act_work;
312
	unsigned long events;
313

C
Christoph Hellwig 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326
#ifdef CONFIG_NVME_MULTIPATH
	/* asymmetric namespace access: */
	u8 anacap;
	u8 anatt;
	u32 anagrpmax;
	u32 nanagrpid;
	struct mutex ana_lock;
	struct nvme_ana_rsp_hdr *ana_log_buf;
	size_t ana_log_size;
	struct timer_list anatt_timer;
	struct work_struct ana_work;
#endif

327 328
	/* Power saving configuration */
	u64 ps_max_latency_us;
329
	bool apst_enabled;
330

331
	/* PCIe only: */
332 333
	u32 hmpre;
	u32 hmmin;
334 335
	u32 hmminds;
	u16 hmmaxd;
336

337 338 339 340 341
	/* Fabrics only */
	u32 ioccsz;
	u32 iorcsz;
	u16 icdoff;
	u16 maxcmd;
342
	int nr_reconnects;
343 344
	unsigned long flags;
#define NVME_CTRL_FAILFAST_EXPIRED	0
M
Ming Lei 已提交
345
#define NVME_CTRL_ADMIN_Q_STOPPED	1
346
	struct nvmf_ctrl_options *opts;
347 348 349

	struct page *discard_page;
	unsigned long discard_page_busy;
350 351

	struct nvme_fault_inject fault_inject;
352 353
};

354 355 356 357 358
enum nvme_iopolicy {
	NVME_IOPOLICY_NUMA,
	NVME_IOPOLICY_RR,
};

C
Christoph Hellwig 已提交
359 360 361 362 363 364 365 366 367 368 369
struct nvme_subsystem {
	int			instance;
	struct device		dev;
	/*
	 * Because we unregister the device on the last put we need
	 * a separate refcount.
	 */
	struct kref		ref;
	struct list_head	entry;
	struct mutex		lock;
	struct list_head	ctrls;
C
Christoph Hellwig 已提交
370
	struct list_head	nsheads;
C
Christoph Hellwig 已提交
371 372 373 374 375
	char			subnqn[NVMF_NQN_SIZE];
	char			serial[20];
	char			model[40];
	char			firmware_rev[8];
	u8			cmic;
376
	enum nvme_subsys_type	subtype;
C
Christoph Hellwig 已提交
377
	u16			vendor_id;
378
	u16			awupf;	/* 0's based awupf value. */
C
Christoph Hellwig 已提交
379
	struct ida		ns_ida;
380 381 382
#ifdef CONFIG_NVME_MULTIPATH
	enum nvme_iopolicy	iopolicy;
#endif
C
Christoph Hellwig 已提交
383 384
};

385 386 387 388 389 390 391
/*
 * Container structure for uniqueue namespace identifiers.
 */
struct nvme_ns_ids {
	u8	eui64[8];
	u8	nguid[16];
	uuid_t	uuid;
392
	u8	csi;
393 394
};

C
Christoph Hellwig 已提交
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*
 * Anchor structure for namespaces.  There is one for each namespace in a
 * NVMe subsystem that any of our controllers can see, and the namespace
 * structure for each controller is chained of it.  For private namespaces
 * there is a 1:1 relation to our namespace structures, that is ->list
 * only ever has a single entry for private namespaces.
 */
struct nvme_ns_head {
	struct list_head	list;
	struct srcu_struct      srcu;
	struct nvme_subsystem	*subsys;
	unsigned		ns_id;
	struct nvme_ns_ids	ids;
	struct list_head	entry;
	struct kref		ref;
410
	bool			shared;
C
Christoph Hellwig 已提交
411
	int			instance;
412
	struct nvme_effects_log *effects;
413 414 415 416

	struct cdev		cdev;
	struct device		cdev_device;

417
	struct gendisk		*disk;
418
#ifdef CONFIG_NVME_MULTIPATH
419 420 421 422
	struct bio_list		requeue_list;
	spinlock_t		requeue_lock;
	struct work_struct	requeue_work;
	struct mutex		lock;
423 424
	unsigned long		flags;
#define NVME_NSHEAD_DISK_LIVE	0
425 426
	struct nvme_ns __rcu	*current_path[];
#endif
C
Christoph Hellwig 已提交
427 428
};

429 430 431 432 433
static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
{
	return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
}

434 435
enum nvme_ns_features {
	NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
436
	NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
437 438
};

439 440 441
struct nvme_ns {
	struct list_head list;

442
	struct nvme_ctrl *ctrl;
443 444
	struct request_queue *queue;
	struct gendisk *disk;
C
Christoph Hellwig 已提交
445 446 447 448
#ifdef CONFIG_NVME_MULTIPATH
	enum nvme_ana_state ana_state;
	u32 ana_grpid;
#endif
C
Christoph Hellwig 已提交
449
	struct list_head siblings;
450
	struct kref kref;
C
Christoph Hellwig 已提交
451
	struct nvme_ns_head *head;
452 453 454

	int lba_shift;
	u16 ms;
455 456
	u16 sgs;
	u32 sws;
457
	u8 pi_type;
K
Keith Busch 已提交
458 459 460
#ifdef CONFIG_BLK_DEV_ZONED
	u64 zsze;
#endif
461
	unsigned long features;
462
	unsigned long flags;
C
Christoph Hellwig 已提交
463 464 465
#define NVME_NS_REMOVING	0
#define NVME_NS_DEAD     	1
#define NVME_NS_ANA_PENDING	2
466
#define NVME_NS_FORCE_RO	3
467
#define NVME_NS_READY		4
M
Ming Lei 已提交
468
#define NVME_NS_STOPPED		5
T
Thomas Tai 已提交
469

470 471 472
	struct cdev		cdev;
	struct device		cdev_device;

T
Thomas Tai 已提交
473 474
	struct nvme_fault_inject fault_inject;

475 476
};

477 478 479 480 481 482
/* NVMe ns supports metadata actions by the controller (generate/strip) */
static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
{
	return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
}

483
struct nvme_ctrl_ops {
M
Ming Lin 已提交
484
	const char *name;
485
	struct module *module;
486 487
	unsigned int flags;
#define NVME_F_FABRICS			(1 << 0)
488
#define NVME_F_METADATA_SUPPORTED	(1 << 1)
489
#define NVME_F_PCI_P2PDMA		(1 << 2)
490
	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
491
	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
492
	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
493
	void (*free_ctrl)(struct nvme_ctrl *ctrl);
494
	void (*submit_async_event)(struct nvme_ctrl *ctrl);
495
	void (*delete_ctrl)(struct nvme_ctrl *ctrl);
M
Ming Lin 已提交
496
	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
497 498
};

499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
/*
 * nvme command_id is constructed as such:
 * | xxxx | xxxxxxxxxxxx |
 *   gen    request tag
 */
#define nvme_genctr_mask(gen)			(gen & 0xf)
#define nvme_cid_install_genctr(gen)		(nvme_genctr_mask(gen) << 12)
#define nvme_genctr_from_cid(cid)		((cid & 0xf000) >> 12)
#define nvme_tag_from_cid(cid)			(cid & 0xfff)

static inline u16 nvme_cid(struct request *rq)
{
	return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
}

static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
		u16 command_id)
{
	u8 genctr = nvme_genctr_from_cid(command_id);
	u16 tag = nvme_tag_from_cid(command_id);
	struct request *rq;

	rq = blk_mq_tag_to_rq(tags, tag);
	if (unlikely(!rq)) {
		pr_err("could not locate request for tag %#x\n",
			tag);
		return NULL;
	}
	if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
		dev_err(nvme_req(rq)->ctrl->device,
			"request %#x genctr mismatch (got %#x expected %#x)\n",
			tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
		return NULL;
	}
	return rq;
}

static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
                u16 command_id)
{
	return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
}

T
Thomas Tai 已提交
542
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
543 544 545
void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
			    const char *dev_name);
void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
T
Thomas Tai 已提交
546 547
void nvme_should_fail(struct request *req);
#else
548 549 550 551 552 553 554
static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
					  const char *dev_name)
{
}
static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
{
}
T
Thomas Tai 已提交
555 556 557
static inline void nvme_should_fail(struct request *req) {}
#endif

558 559 560 561 562 563 564
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
	if (!ctrl->subsystem)
		return -ENOTTY;
	return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
}

565 566 567 568
/*
 * Convert a 512B sector number to a device logical block number.
 */
static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
569
{
570
	return sector >> (ns->lba_shift - SECTOR_SHIFT);
571 572
}

573 574 575 576
/*
 * Convert a device logical block number to a 512B sector number.
 */
static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
577
{
578
	return lba << (ns->lba_shift - SECTOR_SHIFT);
579 580
}

K
Keith Busch 已提交
581 582 583 584 585 586 587 588
/*
 * Convert byte length to nvme's 0-based num dwords
 */
static inline u32 nvme_bytes_to_numd(size_t len)
{
	return (len >> 2) - 1;
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602
static inline bool nvme_is_ana_error(u16 status)
{
	switch (status & 0x7ff) {
	case NVME_SC_ANA_TRANSITION:
	case NVME_SC_ANA_INACCESSIBLE:
	case NVME_SC_ANA_PERSISTENT_LOSS:
		return true;
	default:
		return false;
	}
}

static inline bool nvme_is_path_error(u16 status)
{
603 604
	/* check for a status code type of 'path related status' */
	return (status & 0x700) == 0x300;
605 606
}

607 608 609 610 611 612 613
/*
 * Fill in the status and result information from the CQE, and then figure out
 * if blk-mq will need to use IPI magic to complete the request, and if yes do
 * so.  If not let the caller complete the request without an indirect function
 * call.
 */
static inline bool nvme_try_complete_req(struct request *req, __le16 status,
614
		union nvme_result result)
615
{
616
	struct nvme_request *rq = nvme_req(req);
617 618 619 620
	struct nvme_ctrl *ctrl = rq->ctrl;

	if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
		rq->genctr++;
621

622 623
	rq->status = le16_to_cpu(status) >> 1;
	rq->result = result;
T
Thomas Tai 已提交
624 625
	/* inject error when permitted by fault injection framework */
	nvme_should_fail(req);
626 627 628
	if (unlikely(blk_should_fake_timeout(req->q)))
		return true;
	return blk_mq_complete_request_remote(req);
629 630
}

631 632 633 634 635 636 637 638 639 640
static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
{
	get_device(ctrl->device);
}

static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
{
	put_device(ctrl->device);
}

641 642
static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
{
643 644
	return !qid &&
		nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
645 646
}

647
void nvme_complete_rq(struct request *req);
648 649 650 651 652 653 654 655 656 657 658 659 660 661
void nvme_complete_batch_req(struct request *req);

static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
						void (*fn)(struct request *rq))
{
	struct request *req;

	rq_list_for_each(&iob->req_list, req) {
		fn(req);
		nvme_complete_batch_req(req);
	}
	blk_mq_end_request_batch(iob);
}

662
blk_status_t nvme_host_path_error(struct request *req);
663
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
C
Chao Leng 已提交
664 665
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
666 667
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state);
668
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
669
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
670
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
671
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
672 673
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
		const struct nvme_ctrl_ops *ops, unsigned long quirks);
674
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
675 676
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
677
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
678 679

void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
680

681 682
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
		bool send);
683

684
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
685
		volatile union nvme_result *res);
686

687 688
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
689 690
void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
691
void nvme_kill_queues(struct nvme_ctrl *ctrl);
K
Keith Busch 已提交
692
void nvme_sync_queues(struct nvme_ctrl *ctrl);
C
Chao Leng 已提交
693
void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
K
Keith Busch 已提交
694 695
void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
696
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
K
Keith Busch 已提交
697
void nvme_start_freeze(struct nvme_ctrl *ctrl);
698

699
#define NVME_QID_ANY -1
700
struct request *nvme_alloc_request(struct request_queue *q,
701
		struct nvme_command *cmd, blk_mq_req_flags_t flags);
702
void nvme_cleanup_cmd(struct request *req);
703
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
		struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live);

static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
		bool queue_live)
{
	if (likely(ctrl->state == NVME_CTRL_LIVE))
		return true;
	if (ctrl->ops->flags & NVME_F_FABRICS &&
	    ctrl->state == NVME_CTRL_DELETING)
		return true;
	return __nvme_check_ready(ctrl, rq, queue_live);
}
719 720 721
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
		void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
722
		union nvme_result *result, void *buffer, unsigned bufflen,
723
		unsigned timeout, int qid, int at_head,
724
		blk_mq_req_flags_t flags);
K
Keith Busch 已提交
725 726 727 728 729 730
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result);
int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
		      unsigned int dword11, void *buffer, size_t buflen,
		      u32 *result);
C
Christoph Hellwig 已提交
731
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
S
Sagi Grimberg 已提交
732
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
733
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
734
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
735
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
736
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
737
void nvme_queue_scan(struct nvme_ctrl *ctrl);
738
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
739
		void *log, size_t size, u64 offset);
740 741
bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head);
742 743 744
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
		const struct file_operations *fops, struct module *owner);
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
745 746
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
		unsigned int cmd, unsigned long arg);
747
long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
748 749
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
		unsigned int cmd, unsigned long arg);
750 751
long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
		unsigned long arg);
752 753
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
		unsigned long arg);
754
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
755

756
extern const struct attribute_group *nvme_ns_id_attr_groups[];
757
extern const struct pr_ops nvme_pr_ops;
758 759
extern const struct block_device_operations nvme_ns_head_ops;

760
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
761
#ifdef CONFIG_NVME_MULTIPATH
762 763 764 765 766
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
	return ctrl->ana_log_buf != NULL;
}

767 768 769
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
770
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
771
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
772
void nvme_failover_req(struct request *req);
773 774
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
C
Christoph Hellwig 已提交
775
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
776
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
777 778
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
C
Christoph Hellwig 已提交
779 780
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
781
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
782
void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
783
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
784
void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
785

786
static inline void nvme_trace_bio_complete(struct request *req)
H
Hannes Reinecke 已提交
787 788 789 790
{
	struct nvme_ns *ns = req->q->queuedata;

	if (req->cmd_flags & REQ_NVME_MPATH)
791
		trace_block_bio_complete(ns->head->disk->queue, req->bio);
H
Hannes Reinecke 已提交
792 793
}

C
Christoph Hellwig 已提交
794 795
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
796
extern struct device_attribute subsys_attr_iopolicy;
C
Christoph Hellwig 已提交
797

798
#else
C
Christoph Hellwig 已提交
799 800 801 802
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
	return false;
}
803 804
static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
		int *flags)
805
{
806
	return false;
807
}
808
static inline void nvme_failover_req(struct request *req)
809 810 811 812 813 814 815 816 817 818
{
}
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
{
}
static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
		struct nvme_ns_head *head)
{
	return 0;
}
C
Christoph Hellwig 已提交
819 820
static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
		struct nvme_id_ns *id)
821 822 823 824 825
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
826 827 828 829
static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
	return false;
}
830 831 832
static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
{
}
833
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
834 835
{
}
836
static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
837 838
{
}
839
static inline void nvme_trace_bio_complete(struct request *req)
H
Hannes Reinecke 已提交
840 841
{
}
842 843 844 845
static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
{
}
static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
C
Christoph Hellwig 已提交
846 847
		struct nvme_id_ctrl *id)
{
848
	if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
849 850
		dev_warn(ctrl->device,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
C
Christoph Hellwig 已提交
851 852 853 854 855 856 857 858
	return 0;
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}
static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
{
}
859 860 861 862 863 864 865 866 867
static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
{
}
static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
{
}
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
868 869 870
static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
{
}
871 872
#endif /* CONFIG_NVME_MULTIPATH */

873
int nvme_revalidate_zones(struct nvme_ns *ns);
874 875
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
		unsigned int nr_zones, report_zones_cb cb, void *data);
K
Keith Busch 已提交
876
#ifdef CONFIG_BLK_DEV_ZONED
877
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
K
Keith Busch 已提交
878 879 880 881 882 883 884 885 886 887 888
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
				       struct nvme_command *cmnd,
				       enum nvme_zone_mgmt_action action);
#else
static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
		struct request *req, struct nvme_command *cmnd,
		enum nvme_zone_mgmt_action action)
{
	return BLK_STS_NOTSUPP;
}

889
static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
K
Keith Busch 已提交
890 891 892 893 894 895 896
{
	dev_warn(ns->ctrl->device,
		 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
	return -EPROTONOSUPPORT;
}
#endif

897 898 899 900
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
	return dev_to_disk(dev)->private_data;
}
M
Matias Bjørling 已提交
901

902
#ifdef CONFIG_NVME_HWMON
K
Keith Busch 已提交
903
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
904
void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
905
#else
K
Keith Busch 已提交
906 907 908 909
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
	return 0;
}
910 911 912 913

static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
{
}
914 915
#endif

916 917 918 919 920
static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
{
	return ctrl->sgls & ((1 << 0) | (1 << 1));
}

921 922
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			 u8 opcode);
923
int nvme_execute_passthru_rq(struct request *rq);
924
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
925 926
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
927

928 929 930 931 932
static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
{
	return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
}

933
#endif /* _NVME_H */