提交 9d713c2b 编写于 作者: K Keith Busch 提交者: Matthew Wilcox

NVMe: Handle ioremap failure

Decrement the number of queues required for doorbell remapping until
the memory is successfully mapped for that size.

Additional checks are done so that we don't call free_irq if it has
already been freed.
Signed-off-by: NKeith Busch <keith.busch@intel.com>
Signed-off-by: NMatthew Wilcox <matthew.r.wilcox@intel.com>
上级 cd638946
...@@ -1739,10 +1739,15 @@ static int set_queue_count(struct nvme_dev *dev, int count) ...@@ -1739,10 +1739,15 @@ static int set_queue_count(struct nvme_dev *dev, int count)
return min(result & 0xffff, result >> 16) + 1; return min(result & 0xffff, result >> 16) + 1;
} }
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
}
static int nvme_setup_io_queues(struct nvme_dev *dev) static int nvme_setup_io_queues(struct nvme_dev *dev)
{ {
struct pci_dev *pdev = dev->pci_dev; struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, vecs, nr_io_queues, db_bar_size, q_depth; int result, cpu, i, vecs, nr_io_queues, size, q_depth;
nr_io_queues = num_online_cpus(); nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues); result = set_queue_count(dev, nr_io_queues);
...@@ -1751,17 +1756,24 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1751,17 +1756,24 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues) if (result < nr_io_queues)
nr_io_queues = result; nr_io_queues = result;
/* Deregister the admin queue's interrupt */ size = db_bar_size(dev, nr_io_queues);
free_irq(dev->entry[0].vector, dev->queues[0]); if (size > 8192) {
db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
if (db_bar_size > 8192) {
iounmap(dev->bar); iounmap(dev->bar);
dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); do {
dev->bar = ioremap(pci_resource_start(pdev, 0), size);
if (dev->bar)
break;
if (!--nr_io_queues)
return -ENOMEM;
size = db_bar_size(dev, nr_io_queues);
} while (1);
dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs; dev->queues[0]->q_db = dev->dbs;
} }
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, dev->queues[0]);
vecs = nr_io_queues; vecs = nr_io_queues;
for (i = 0; i < vecs; i++) for (i = 0; i < vecs; i++)
dev->entry[i].entry = i; dev->entry[i].entry = i;
...@@ -1799,8 +1811,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1799,8 +1811,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
nr_io_queues = vecs; nr_io_queues = vecs;
result = queue_request_irq(dev, dev->queues[0], "nvme admin"); result = queue_request_irq(dev, dev->queues[0], "nvme admin");
if (result) if (result) {
dev->queues[0]->q_suspended = 1;
goto free_queues; goto free_queues;
}
/* Free previously allocated queues that are no longer usable */ /* Free previously allocated queues that are no longer usable */
spin_lock(&dev_list_lock); spin_lock(&dev_list_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册