提交 65fb1b0d 编写于 作者: J Jens Axboe

Merge tag 'nvme-5.12-2021-02-11' of git://git.infradead.org/nvme into for-5.12/drivers

Pull NVMe updates from Christoph:

"nvme updates for 5.12:

 - fix multipath handling of ->queue_rq errors (Chao Leng)
 - nvmet cleanups (Chaitanya Kulkarni)
 - add a quirk for buggy Amazon controller (Filippo Sironi)
 - avoid devm allocations in nvme-hwmon that don't interact well with
   fabrics (Hannes Reinecke)
 - sysfs cleanups (Jiapeng Chong)
 - fix nr_zones for multipath (Keith Busch)
 - nvme-tcp crash fix for no-data commands (Sagi Grimberg)
 - nvmet-tcp fixes (Sagi Grimberg)
 - add a missing __rcu annotation (me)"

* tag 'nvme-5.12-2021-02-11' of git://git.infradead.org/nvme: (22 commits)
  nvme-tcp: fix crash triggered with a dataless request submission
  nvme: add 48-bit DMA address quirk for Amazon NVMe controllers
  nvme-hwmon: rework to avoid devm allocation
  nvmet: remove else at the end of the function
  nvmet: add nvmet_req_subsys() helper
  nvmet: use min of device_path and disk len
  nvmet: use invalid cmd opcode helper
  nvmet: use invalid cmd opcode helper
  nvmet: add helper to report invalid opcode
  nvmet: remove extra variable in id-ns handler
  nvmet: make nvmet_find_namespace() req based
  nvmet: return uniform error for invalid ns
  nvmet: set status to 0 in case for invalid nsid
  nvmet-fc: add a missing __rcu annotation to nvmet_fc_tgt_assoc.queues
  nvme-multipath: set nr_zones for zoned namespaces
  nvmet-tcp: fix potential race of tcp socket closing accept_work
  nvmet-tcp: fix receive data digest calculation for multiple h2cdata PDUs
  nvme-rdma: handle nvme_rdma_post_send failures better
  nvme-fabrics: avoid double completions in nvmf_fail_nonready_command
  nvme: introduce a nvme_host_path_error helper
  ...
...@@ -355,6 +355,21 @@ void nvme_complete_rq(struct request *req) ...@@ -355,6 +355,21 @@ void nvme_complete_rq(struct request *req)
} }
EXPORT_SYMBOL_GPL(nvme_complete_rq); EXPORT_SYMBOL_GPL(nvme_complete_rq);
/*
* Called to unwind from ->queue_rq on a failed command submission so that the
* multipathing code gets called to potentially failover to another path.
* The caller needs to unwind all transport specific resource allocations and
* must return propagate the return value.
*/
blk_status_t nvme_host_path_error(struct request *req)
{
nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
blk_mq_set_request_complete(req);
nvme_complete_rq(req);
return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(nvme_host_path_error);
bool nvme_cancel_request(struct request *req, void *data, bool reserved) bool nvme_cancel_request(struct request *req, void *data, bool reserved)
{ {
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
...@@ -2848,7 +2863,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev, ...@@ -2848,7 +2863,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
struct nvme_subsystem *subsys = struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev); container_of(dev, struct nvme_subsystem, dev);
return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); return sysfs_emit(buf, "%s\n", subsys->subnqn);
} }
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
...@@ -3541,7 +3556,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev, ...@@ -3541,7 +3556,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
{ {
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); return sysfs_emit(buf, "%s\n", ctrl->ops->name);
} }
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
...@@ -3575,7 +3590,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, ...@@ -3575,7 +3590,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{ {
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
} }
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
...@@ -3585,7 +3600,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, ...@@ -3585,7 +3600,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
{ {
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn); return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
} }
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
...@@ -3595,7 +3610,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev, ...@@ -3595,7 +3610,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev,
{ {
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id); return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
} }
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
...@@ -4456,6 +4471,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); ...@@ -4456,6 +4471,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{ {
nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject); nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device); dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device); cdev_device_del(&ctrl->cdev, ctrl->device);
......
...@@ -552,11 +552,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, ...@@ -552,11 +552,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
return nvme_host_path_error(rq);
nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
blk_mq_start_request(rq);
nvme_complete_rq(rq);
return BLK_STS_OK;
} }
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
......
...@@ -223,12 +223,12 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = { ...@@ -223,12 +223,12 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
int nvme_hwmon_init(struct nvme_ctrl *ctrl) int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{ {
struct device *dev = ctrl->dev; struct device *dev = ctrl->device;
struct nvme_hwmon_data *data; struct nvme_hwmon_data *data;
struct device *hwmon; struct device *hwmon;
int err; int err;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return 0; return 0;
...@@ -237,19 +237,30 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) ...@@ -237,19 +237,30 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
err = nvme_hwmon_get_smart_log(data); err = nvme_hwmon_get_smart_log(data);
if (err) { if (err) {
dev_warn(ctrl->device, dev_warn(dev, "Failed to read smart log (error %d)\n", err);
"Failed to read smart log (error %d)\n", err); kfree(data);
devm_kfree(dev, data);
return err; return err;
} }
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data, hwmon = hwmon_device_register_with_info(dev, "nvme",
&nvme_hwmon_chip_info, data, &nvme_hwmon_chip_info,
NULL); NULL);
if (IS_ERR(hwmon)) { if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n"); dev_warn(dev, "Failed to instantiate hwmon device\n");
devm_kfree(dev, data); kfree(data);
} }
ctrl->hwmon_device = hwmon;
return 0; return 0;
} }
void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
{
if (ctrl->hwmon_device) {
struct nvme_hwmon_data *data =
dev_get_drvdata(ctrl->hwmon_device);
hwmon_device_unregister(ctrl->hwmon_device);
ctrl->hwmon_device = NULL;
kfree(data);
}
}
...@@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_stable_writes(ns->queue) && ns->head->disk) if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
ns->head->disk->queue); ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
#endif
} }
void nvme_mpath_remove_disk(struct nvme_ns_head *head) void nvme_mpath_remove_disk(struct nvme_ns_head *head)
......
...@@ -144,6 +144,12 @@ enum nvme_quirks { ...@@ -144,6 +144,12 @@ enum nvme_quirks {
* NVMe 1.3 compliance. * NVMe 1.3 compliance.
*/ */
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
/*
* The controller does not properly handle DMA addresses over
* 48 bits.
*/
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
}; };
/* /*
...@@ -246,6 +252,9 @@ struct nvme_ctrl { ...@@ -246,6 +252,9 @@ struct nvme_ctrl {
struct rw_semaphore namespaces_rwsem; struct rw_semaphore namespaces_rwsem;
struct device ctrl_device; struct device ctrl_device;
struct device *device; /* char device */ struct device *device; /* char device */
#ifdef CONFIG_NVME_HWMON
struct device *hwmon_device;
#endif
struct cdev cdev; struct cdev cdev;
struct work_struct reset_work; struct work_struct reset_work;
struct work_struct delete_work; struct work_struct delete_work;
...@@ -575,6 +584,7 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) ...@@ -575,6 +584,7 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
} }
void nvme_complete_rq(struct request *req); void nvme_complete_rq(struct request *req);
blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved); bool nvme_cancel_request(struct request *req, void *data, bool reserved);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl); void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
...@@ -811,11 +821,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) ...@@ -811,11 +821,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
#ifdef CONFIG_NVME_HWMON #ifdef CONFIG_NVME_HWMON
int nvme_hwmon_init(struct nvme_ctrl *ctrl); int nvme_hwmon_init(struct nvme_ctrl *ctrl);
void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
#else #else
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{ {
return 0; return 0;
} }
static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
{
}
#endif #endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
......
...@@ -2362,13 +2362,16 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -2362,13 +2362,16 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{ {
int result = -ENOMEM; int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
int dma_address_bits = 64;
if (pci_enable_device_mem(pdev)) if (pci_enable_device_mem(pdev))
return result; return result;
pci_set_master(pdev); pci_set_master(pdev);
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64))) if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
dma_address_bits = 48;
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
goto disable; goto disable;
if (readl(dev->bar + NVME_REG_CSTS) == -1) { if (readl(dev->bar + NVME_REG_CSTS) == -1) {
...@@ -3257,6 +3260,22 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3257,6 +3260,22 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR }, .driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
......
...@@ -2098,7 +2098,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2098,7 +2098,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err_unmap: err_unmap:
nvme_rdma_unmap_data(queue, rq); nvme_rdma_unmap_data(queue, rq);
err: err:
if (err == -ENOMEM || err == -EAGAIN) if (err == -EIO)
ret = nvme_host_path_error(rq);
else if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE; ret = BLK_STS_RESOURCE;
else else
ret = BLK_STS_IOERR; ret = BLK_STS_IOERR;
......
...@@ -2271,7 +2271,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, ...@@ -2271,7 +2271,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_len = blk_rq_nr_phys_segments(rq) ? req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0; blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio; req->curr_bio = rq->bio;
if (req->curr_bio) if (req->curr_bio && req->data_len)
nvme_tcp_init_iter(req, rq_data_dir(rq)); nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE && if (rq_data_dir(rq) == WRITE &&
......
...@@ -75,15 +75,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, ...@@ -75,15 +75,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog) struct nvme_smart_log *slog)
{ {
u64 host_reads, host_writes, data_units_read, data_units_written; u64 host_reads, host_writes, data_units_read, data_units_written;
u16 status;
req->ns = nvmet_find_namespace(req->sq->ctrl, status = nvmet_req_find_ns(req);
req->cmd->get_log_page.nsid); if (status)
if (!req->ns) { return status;
pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
req->error_loc = offsetof(struct nvme_rw_command, nsid);
return NVME_SC_INVALID_NS;
}
/* we don't have the right data for file backed ns */ /* we don't have the right data for file backed ns */
if (!req->ns->bdev) if (!req->ns->bdev)
...@@ -466,9 +462,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -466,9 +462,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
static void nvmet_execute_identify_ns(struct nvmet_req *req) static void nvmet_execute_identify_ns(struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ns *id; struct nvme_id_ns *id;
u16 status = 0; u16 status;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid); req->error_loc = offsetof(struct nvme_identify, nsid);
...@@ -483,9 +478,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) ...@@ -483,9 +478,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
} }
/* return an all zeroed buffer if we can't find an active namespace */ /* return an all zeroed buffer if we can't find an active namespace */
req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid); status = nvmet_req_find_ns(req);
if (!req->ns) { if (status) {
status = NVME_SC_INVALID_NS; status = 0;
goto done; goto done;
} }
...@@ -527,7 +522,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) ...@@ -527,7 +522,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->lbaf[0].ds = req->ns->blksize_shift; id->lbaf[0].ds = req->ns->blksize_shift;
if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 | NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
NVME_NS_DPC_PI_TYPE3; NVME_NS_DPC_PI_TYPE3;
...@@ -604,15 +599,12 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, ...@@ -604,15 +599,12 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
static void nvmet_execute_identify_desclist(struct nvmet_req *req) static void nvmet_execute_identify_desclist(struct nvmet_req *req)
{ {
u16 status = 0;
off_t off = 0; off_t off = 0;
u16 status;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); status = nvmet_req_find_ns(req);
if (!req->ns) { if (status)
req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto out; goto out;
}
if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
...@@ -691,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) ...@@ -691,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{ {
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
struct nvmet_subsys *subsys = req->sq->ctrl->subsys; struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE; u16 status;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); status = nvmet_req_find_ns(req);
if (unlikely(!req->ns)) { if (status)
req->error_loc = offsetof(struct nvme_common_command, nsid);
return status; return status;
}
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
switch (write_protect) { switch (write_protect) {
...@@ -752,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) ...@@ -752,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
void nvmet_execute_set_features(struct nvmet_req *req) void nvmet_execute_set_features(struct nvmet_req *req)
{ {
struct nvmet_subsys *subsys = req->sq->ctrl->subsys; struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0; u16 status = 0;
...@@ -796,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -796,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req)
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{ {
struct nvmet_subsys *subsys = req->sq->ctrl->subsys; struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 result; u32 result;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); result = nvmet_req_find_ns(req);
if (!req->ns) { if (result)
req->error_loc = offsetof(struct nvme_common_command, nsid); return result;
return NVME_SC_INVALID_NS | NVME_SC_DNR;
}
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
if (req->ns->readonly == true) if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT; result = NVME_NS_WRITE_PROTECT;
...@@ -827,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req) ...@@ -827,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
void nvmet_execute_get_features(struct nvmet_req *req) void nvmet_execute_get_features(struct nvmet_req *req)
{ {
struct nvmet_subsys *subsys = req->sq->ctrl->subsys; struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0; u16 status = 0;
...@@ -934,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) ...@@ -934,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd)) if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req); return nvmet_parse_fabrics_cmd(req);
if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
return nvmet_parse_discovery_cmd(req); return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req, cmd); ret = nvmet_check_ctrl_status(req, cmd);
......
...@@ -82,6 +82,15 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) ...@@ -82,6 +82,15 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return status; return status;
} }
u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
{
pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn); const char *subsysnqn);
...@@ -417,15 +426,18 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) ...@@ -417,15 +426,18 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
cancel_delayed_work_sync(&ctrl->ka_work); cancel_delayed_work_sync(&ctrl->ka_work);
} }
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) u16 nvmet_req_find_ns(struct nvmet_req *req)
{ {
struct nvmet_ns *ns; u32 nsid = le32_to_cpu(req->cmd->common.nsid);
ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid)); req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
if (ns) if (unlikely(!req->ns)) {
percpu_ref_get(&ns->ref); req->error_loc = offsetof(struct nvme_common_command, nsid);
return NVME_SC_INVALID_NS | NVME_SC_DNR;
}
return ns; percpu_ref_get(&req->ns->ref);
return NVME_SC_SUCCESS;
} }
static void nvmet_destroy_namespace(struct percpu_ref *ref) static void nvmet_destroy_namespace(struct percpu_ref *ref)
...@@ -862,11 +874,10 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) ...@@ -862,11 +874,10 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (nvmet_req_passthru_ctrl(req)) if (nvmet_req_passthru_ctrl(req))
return nvmet_parse_passthru_io_cmd(req); return nvmet_parse_passthru_io_cmd(req);
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); ret = nvmet_req_find_ns(req);
if (unlikely(!req->ns)) { if (unlikely(ret))
req->error_loc = offsetof(struct nvme_common_command, nsid); return ret;
return NVME_SC_INVALID_NS | NVME_SC_DNR;
}
ret = nvmet_check_ana_state(req->port, req->ns); ret = nvmet_check_ana_state(req->port, req->ns);
if (unlikely(ret)) { if (unlikely(ret)) {
req->error_loc = offsetof(struct nvme_common_command, nsid); req->error_loc = offsetof(struct nvme_common_command, nsid);
...@@ -880,8 +891,8 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) ...@@ -880,8 +891,8 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (req->ns->file) if (req->ns->file)
return nvmet_file_parse_io_cmd(req); return nvmet_file_parse_io_cmd(req);
else
return nvmet_bdev_parse_io_cmd(req); return nvmet_bdev_parse_io_cmd(req);
} }
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
......
...@@ -165,7 +165,7 @@ struct nvmet_fc_tgt_assoc { ...@@ -165,7 +165,7 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport; struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn; struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list; struct list_head a_list;
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
struct kref ref; struct kref ref;
struct work_struct del_work; struct work_struct del_work;
struct rcu_head rcu; struct rcu_head rcu;
......
...@@ -449,9 +449,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) ...@@ -449,9 +449,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_bdev_execute_write_zeroes; req->execute = nvmet_bdev_execute_write_zeroes;
return 0; return 0;
default: default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, return nvmet_report_invalid_opcode(req);
req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
} }
} }
...@@ -400,9 +400,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) ...@@ -400,9 +400,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_file_execute_write_zeroes; req->execute = nvmet_file_execute_write_zeroes;
return 0; return 0;
default: default:
pr_err("unhandled cmd for file ns %d on qid %d\n", return nvmet_report_invalid_opcode(req);
cmd->common.opcode, req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
} }
} }
...@@ -443,7 +443,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, ...@@ -443,7 +443,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
void nvmet_subsys_put(struct nvmet_subsys *subsys); void nvmet_subsys_put(struct nvmet_subsys *subsys);
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); u16 nvmet_req_find_ns(struct nvmet_req *req);
void nvmet_put_namespace(struct nvmet_ns *ns); void nvmet_put_namespace(struct nvmet_ns *ns);
int nvmet_ns_enable(struct nvmet_ns *ns); int nvmet_ns_enable(struct nvmet_ns *ns);
void nvmet_ns_disable(struct nvmet_ns *ns); void nvmet_ns_disable(struct nvmet_ns *ns);
...@@ -551,6 +551,11 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req) ...@@ -551,6 +551,11 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req)
sizeof(struct nvme_dsm_range); sizeof(struct nvme_dsm_range);
} }
static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
{
return req->sq->ctrl->subsys;
}
#ifdef CONFIG_NVME_TARGET_PASSTHRU #ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
...@@ -585,10 +590,11 @@ static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) ...@@ -585,10 +590,11 @@ static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
static inline struct nvme_ctrl * static inline struct nvme_ctrl *
nvmet_req_passthru_ctrl(struct nvmet_req *req) nvmet_req_passthru_ctrl(struct nvmet_req *req)
{ {
return nvmet_passthru_ctrl(req->sq->ctrl->subsys); return nvmet_passthru_ctrl(nvmet_req_subsys(req));
} }
u16 errno_to_nvme_status(struct nvmet_req *req, int errno); u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
/* Convert a 32-bit number to a 16-bit 0's based number */ /* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a) static inline __le16 to0based(u32 a)
......
...@@ -239,9 +239,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ...@@ -239,9 +239,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
} }
q = ns->queue; q = ns->queue;
timeout = req->sq->ctrl->subsys->io_timeout; timeout = nvmet_req_subsys(req)->io_timeout;
} else { } else {
timeout = req->sq->ctrl->subsys->admin_timeout; timeout = nvmet_req_subsys(req)->admin_timeout;
} }
rq = nvme_alloc_request(q, req->cmd, 0); rq = nvme_alloc_request(q, req->cmd, 0);
...@@ -494,7 +494,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) ...@@ -494,7 +494,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
return nvmet_setup_passthru_command(req); return nvmet_setup_passthru_command(req);
default: default:
/* Reject commands not in the allowlist above */ /* Reject commands not in the allowlist above */
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return nvmet_report_invalid_opcode(req);
} }
} }
......
...@@ -378,7 +378,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) ...@@ -378,7 +378,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return NVME_SC_INTERNAL; return NVME_SC_INTERNAL;
} }
static void nvmet_tcp_ddgst(struct ahash_request *hash, static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd) struct nvmet_tcp_cmd *cmd)
{ {
ahash_request_set_crypt(hash, cmd->req.sg, ahash_request_set_crypt(hash, cmd->req.sg,
...@@ -386,6 +386,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash, ...@@ -386,6 +386,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
crypto_ahash_digest(hash); crypto_ahash_digest(hash);
} }
static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
struct scatterlist sg;
struct kvec *iov;
int i;
crypto_ahash_init(hash);
for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
sg_init_one(&sg, iov->iov_base, iov->iov_len);
ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
crypto_ahash_update(hash);
}
ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
crypto_ahash_final(hash);
}
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{ {
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
...@@ -410,7 +427,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) ...@@ -410,7 +427,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) { if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST; pdu->hdr.flags |= NVME_TCP_F_DDGST;
nvmet_tcp_ddgst(queue->snd_hash, cmd); nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
} }
if (cmd->queue->hdr_digest) { if (cmd->queue->hdr_digest) {
...@@ -1059,7 +1076,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) ...@@ -1059,7 +1076,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{ {
struct nvmet_tcp_queue *queue = cmd->queue; struct nvmet_tcp_queue *queue = cmd->queue;
nvmet_tcp_ddgst(queue->rcv_hash, cmd); nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
queue->offset = 0; queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH; queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST; queue->rcv_state = NVMET_TCP_RECV_DDGST;
...@@ -1080,14 +1097,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) ...@@ -1080,14 +1097,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret; cmd->rbytes_done += ret;
} }
if (queue->data_digest) {
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
}
nvmet_tcp_unmap_pdu_iovec(cmd); nvmet_tcp_unmap_pdu_iovec(cmd);
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) { cmd->rbytes_done == cmd->req.transfer_len) {
if (queue->data_digest) {
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
}
cmd->req.execute(&cmd->req); cmd->req.execute(&cmd->req);
} }
...@@ -1467,17 +1484,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) ...@@ -1467,17 +1484,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
if (inet->rcv_tos > 0) if (inet->rcv_tos > 0)
ip_sock_set_tos(sock->sk, inet->rcv_tos); ip_sock_set_tos(sock->sk, inet->rcv_tos);
ret = 0;
write_lock_bh(&sock->sk->sk_callback_lock); write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = queue; if (sock->sk->sk_state != TCP_ESTABLISHED) {
queue->data_ready = sock->sk->sk_data_ready; /*
sock->sk->sk_data_ready = nvmet_tcp_data_ready; * If the socket is already closing, don't even start
queue->state_change = sock->sk->sk_state_change; * consuming it
sock->sk->sk_state_change = nvmet_tcp_state_change; */
queue->write_space = sock->sk->sk_write_space; ret = -ENOTCONN;
sock->sk->sk_write_space = nvmet_tcp_write_space; } else {
sock->sk->sk_user_data = queue;
queue->data_ready = sock->sk->sk_data_ready;
sock->sk->sk_data_ready = nvmet_tcp_data_ready;
queue->state_change = sock->sk->sk_state_change;
sock->sk->sk_state_change = nvmet_tcp_state_change;
queue->write_space = sock->sk->sk_write_space;
sock->sk->sk_write_space = nvmet_tcp_write_space;
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
write_unlock_bh(&sock->sk->sk_callback_lock); write_unlock_bh(&sock->sk->sk_callback_lock);
return 0; return ret;
} }
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
...@@ -1525,8 +1552,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ...@@ -1525,8 +1552,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret) if (ret)
goto out_destroy_sq; goto out_destroy_sq;
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
return 0; return 0;
out_destroy_sq: out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex); mutex_lock(&nvmet_tcp_queue_mutex);
......
...@@ -48,10 +48,13 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req) ...@@ -48,10 +48,13 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
static inline void __assign_req_name(char *name, struct nvmet_req *req) static inline void __assign_req_name(char *name, struct nvmet_req *req)
{ {
if (req->ns) if (!req->ns) {
strncpy(name, req->ns->device_path, DISK_NAME_LEN);
else
memset(name, 0, DISK_NAME_LEN); memset(name, 0, DISK_NAME_LEN);
return;
}
strncpy(name, req->ns->device_path,
min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
} }
#endif #endif
......
...@@ -490,6 +490,18 @@ static inline int blk_mq_request_completed(struct request *rq) ...@@ -490,6 +490,18 @@ static inline int blk_mq_request_completed(struct request *rq)
return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE; return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
} }
/*
*
* Set the state to complete when completing a request from inside ->queue_rq.
* This is used by drivers that want to ensure special complete actions that
* need access to the request are called on failure, e.g. by nvme for
* multipathing.
*/
static inline void blk_mq_set_request_complete(struct request *rq)
{
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
}
void blk_mq_start_request(struct request *rq); void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error); void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error); void __blk_mq_end_request(struct request *rq, blk_status_t error);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册