提交 a5768aa8 编写于 作者: K Keith Busch 提交者: Jens Axboe

NVMe: Automatic namespace rescan

Namespaces may be dynamically allocated and deleted or attached and
detached. This has the driver rescan the device for namespace changes
after each device reset or namespace change asynchronous event.

There could potentially be many detached namespaces that we don't want
polluting /dev/ with unusable block handles, so this will delete disks
if the namespace is not active as indicated by the response from identify
namespace. This also skips adding the disk if no capacity is provisioned
to the namespace in the first place.
Signed-off-by: NKeith Busch <keith.busch@intel.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 b281ebb8
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/kdev_t.h> #include <linux/kdev_t.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list_sort.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
...@@ -300,9 +301,16 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, ...@@ -300,9 +301,16 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
++nvmeq->dev->event_limit; ++nvmeq->dev->event_limit;
if (status == NVME_SC_SUCCESS) if (status != NVME_SC_SUCCESS)
dev_warn(nvmeq->q_dmadev, return;
"async event result %08x\n", result);
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
dev_info(nvmeq->q_dmadev, "rescanning\n");
schedule_work(&nvmeq->dev->scan_work);
default:
dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result);
}
} }
static void abort_completion(struct nvme_queue *nvmeq, void *ctx, static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
...@@ -1923,8 +1931,13 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -1923,8 +1931,13 @@ static int nvme_revalidate_disk(struct gendisk *disk)
unsigned short bs; unsigned short bs;
if (nvme_identify_ns(dev, ns->ns_id, &id)) { if (nvme_identify_ns(dev, ns->ns_id, &id)) {
dev_warn(dev->dev, "%s: Identify failure\n", __func__); dev_warn(dev->dev, "%s: Identify failure nvme%dn%d\n", __func__,
return 0; dev->instance, ns->ns_id);
return -ENODEV;
}
if (id->ncap == 0) {
kfree(id);
return -ENODEV;
} }
old_ms = ns->ms; old_ms = ns->ms;
...@@ -1958,7 +1971,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -1958,7 +1971,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
!ns->ext) !ns->ext)
nvme_init_integrity(ns); nvme_init_integrity(ns);
if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) if (ns->ms && !blk_get_integrity(disk))
set_capacity(disk, 0); set_capacity(disk, 0);
else else
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
...@@ -2073,11 +2086,16 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) ...@@ -2073,11 +2086,16 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
* requires it. * requires it.
*/ */
set_capacity(disk, 0); set_capacity(disk, 0);
nvme_revalidate_disk(ns->disk); if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
add_disk(ns->disk); add_disk(ns->disk);
if (ns->ms) if (ns->ms)
revalidate_disk(ns->disk); revalidate_disk(ns->disk);
return; return;
out_free_disk:
kfree(disk);
list_del(&ns->list);
out_free_queue: out_free_queue:
blk_cleanup_queue(ns->queue); blk_cleanup_queue(ns->queue);
out_free_ns: out_free_ns:
...@@ -2194,6 +2212,99 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2194,6 +2212,99 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return result; return result;
} }
static void nvme_free_namespace(struct nvme_ns *ns)
{
list_del(&ns->list);
spin_lock(&dev_list_lock);
ns->disk->private_data = NULL;
spin_unlock(&dev_list_lock);
put_disk(ns->disk);
kfree(ns);
}
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
return nsa->ns_id - nsb->ns_id;
}
static struct nvme_ns *nvme_find_ns(struct nvme_dev *dev, unsigned nsid)
{
struct nvme_ns *ns;
list_for_each_entry(ns, &dev->namespaces, list) {
if (ns->ns_id == nsid)
return ns;
if (ns->ns_id > nsid)
break;
}
return NULL;
}
static inline bool nvme_io_incapable(struct nvme_dev *dev)
{
return (!dev->bar || readl(&dev->bar->csts) & NVME_CSTS_CFS ||
dev->online_queues < 2);
}
static void nvme_ns_remove(struct nvme_ns *ns)
{
bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
if (kill)
blk_set_queue_dying(ns->queue);
if (ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
del_gendisk(ns->disk);
}
if (kill || !blk_queue_dying(ns->queue)) {
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
}
static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
{
struct nvme_ns *ns, *next;
unsigned i;
for (i = 1; i <= nn; i++) {
ns = nvme_find_ns(dev, i);
if (ns) {
if (revalidate_disk(ns->disk)) {
nvme_ns_remove(ns);
nvme_free_namespace(ns);
}
} else
nvme_alloc_ns(dev, i);
}
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
if (ns->ns_id > nn) {
nvme_ns_remove(ns);
nvme_free_namespace(ns);
}
}
list_sort(NULL, &dev->namespaces, ns_cmp);
}
static void nvme_dev_scan(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
struct nvme_id_ctrl *ctrl;
if (!dev->tagset.tags)
return;
if (nvme_identify_ctrl(dev, &ctrl))
return;
nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
kfree(ctrl);
}
/* /*
* Return: error value if an error occurred setting up the queues or calling * Return: error value if an error occurred setting up the queues or calling
* Identify Device. 0 if these succeeded, even if adding some of the * Identify Device. 0 if these succeeded, even if adding some of the
...@@ -2204,7 +2315,7 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2204,7 +2315,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
int res; int res;
unsigned nn, i; unsigned nn;
struct nvme_id_ctrl *ctrl; struct nvme_id_ctrl *ctrl;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
...@@ -2250,9 +2361,7 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2250,9 +2361,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (blk_mq_alloc_tag_set(&dev->tagset)) if (blk_mq_alloc_tag_set(&dev->tagset))
return 0; return 0;
for (i = 1; i <= nn; i++) schedule_work(&dev->scan_work);
nvme_alloc_ns(dev, i);
return 0; return 0;
} }
...@@ -2552,17 +2661,8 @@ static void nvme_dev_remove(struct nvme_dev *dev) ...@@ -2552,17 +2661,8 @@ static void nvme_dev_remove(struct nvme_dev *dev)
{ {
struct nvme_ns *ns; struct nvme_ns *ns;
list_for_each_entry(ns, &dev->namespaces, list) { list_for_each_entry(ns, &dev->namespaces, list)
if (ns->disk->flags & GENHD_FL_UP) { nvme_ns_remove(ns);
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
del_gendisk(ns->disk);
}
if (!blk_queue_dying(ns->queue)) {
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
}
} }
static int nvme_setup_prp_pools(struct nvme_dev *dev) static int nvme_setup_prp_pools(struct nvme_dev *dev)
...@@ -2621,16 +2721,8 @@ static void nvme_free_namespaces(struct nvme_dev *dev) ...@@ -2621,16 +2721,8 @@ static void nvme_free_namespaces(struct nvme_dev *dev)
{ {
struct nvme_ns *ns, *next; struct nvme_ns *ns, *next;
list_for_each_entry_safe(ns, next, &dev->namespaces, list) { list_for_each_entry_safe(ns, next, &dev->namespaces, list)
list_del(&ns->list); nvme_free_namespace(ns);
spin_lock(&dev_list_lock);
ns->disk->private_data = NULL;
spin_unlock(&dev_list_lock);
put_disk(ns->disk);
kfree(ns);
}
} }
static void nvme_free_dev(struct kref *kref) static void nvme_free_dev(struct kref *kref)
...@@ -2814,6 +2906,7 @@ static int nvme_dev_resume(struct nvme_dev *dev) ...@@ -2814,6 +2906,7 @@ static int nvme_dev_resume(struct nvme_dev *dev)
spin_unlock(&dev_list_lock); spin_unlock(&dev_list_lock);
} else { } else {
nvme_unfreeze_queues(dev); nvme_unfreeze_queues(dev);
schedule_work(&dev->scan_work);
nvme_set_irq_hints(dev); nvme_set_irq_hints(dev);
} }
return 0; return 0;
...@@ -2935,6 +3028,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2935,6 +3028,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto put_dev; goto put_dev;
INIT_LIST_HEAD(&dev->node); INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->probe_work, nvme_async_probe); INIT_WORK(&dev->probe_work, nvme_async_probe);
schedule_work(&dev->probe_work); schedule_work(&dev->probe_work);
return 0; return 0;
...@@ -3007,6 +3101,7 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -3007,6 +3101,7 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
flush_work(&dev->probe_work); flush_work(&dev->probe_work);
flush_work(&dev->reset_work); flush_work(&dev->reset_work);
flush_work(&dev->scan_work);
device_remove_file(dev->device, &dev_attr_reset_controller); device_remove_file(dev->device, &dev_attr_reset_controller);
nvme_dev_shutdown(dev); nvme_dev_shutdown(dev);
nvme_dev_remove(dev); nvme_dev_remove(dev);
......
...@@ -92,6 +92,7 @@ struct nvme_dev { ...@@ -92,6 +92,7 @@ struct nvme_dev {
work_func_t reset_workfn; work_func_t reset_workfn;
struct work_struct reset_work; struct work_struct reset_work;
struct work_struct probe_work; struct work_struct probe_work;
struct work_struct scan_work;
char name[12]; char name[12];
char serial[20]; char serial[20];
char model[40]; char model[40];
......
...@@ -179,6 +179,10 @@ enum { ...@@ -179,6 +179,10 @@ enum {
NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
}; };
enum {
NVME_AER_NOTICE_NS_CHANGED = 0x0002,
};
struct nvme_lba_range_type { struct nvme_lba_range_type {
__u8 type; __u8 type;
__u8 attributes; __u8 attributes;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册