提交 cd50f9b2 编写于 作者: C Christoph Hellwig 提交者: Jens Axboe

nvme: split nvme_kill_queues

nvme_kill_queues does two things:

 1) mark the gendisk of all namespaces dead
 2) unquiesce all I/O queues

These used to be be intertwined due to block layer issues, but aren't
any more.  So move the unquiscing of the I/O queues into the callers,
and rename the rest of the function to the now more descriptive
nvme_mark_namespaces_dead.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Reviewed-by: NChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20221101150050.3510-8-hch@lst.deSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 6bcd5089
...@@ -1153,7 +1153,8 @@ static void apple_nvme_reset_work(struct work_struct *work) ...@@ -1153,7 +1153,8 @@ static void apple_nvme_reset_work(struct work_struct *work)
nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING); nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
nvme_get_ctrl(&anv->ctrl); nvme_get_ctrl(&anv->ctrl);
apple_nvme_disable(anv, false); apple_nvme_disable(anv, false);
nvme_kill_queues(&anv->ctrl); nvme_mark_namespaces_dead(&anv->ctrl);
nvme_start_queues(&anv->ctrl);
if (!queue_work(nvme_wq, &anv->remove_work)) if (!queue_work(nvme_wq, &anv->remove_work))
nvme_put_ctrl(&anv->ctrl); nvme_put_ctrl(&anv->ctrl);
} }
......
...@@ -4561,8 +4561,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) ...@@ -4561,8 +4561,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
* removing the namespaces' disks; fail all the queues now to avoid * removing the namespaces' disks; fail all the queues now to avoid
* potentially having to clean up the failed sync later. * potentially having to clean up the failed sync later.
*/ */
if (ctrl->state == NVME_CTRL_DEAD) if (ctrl->state == NVME_CTRL_DEAD) {
nvme_kill_queues(ctrl); nvme_mark_namespaces_dead(ctrl);
nvme_start_queues(ctrl);
}
/* this is a no-op when called from the controller reset handler */ /* this is a no-op when called from the controller reset handler */
nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
...@@ -5108,39 +5110,17 @@ static void nvme_stop_ns_queue(struct nvme_ns *ns) ...@@ -5108,39 +5110,17 @@ static void nvme_stop_ns_queue(struct nvme_ns *ns)
blk_mq_wait_quiesce_done(ns->queue); blk_mq_wait_quiesce_done(ns->queue);
} }
/* /* let I/O to all namespaces fail in preparation for surprise removal */
* Prepare a queue for teardown. void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
*
* This must forcibly unquiesce queues to avoid blocking dispatch.
*/
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
blk_mark_disk_dead(ns->disk);
nvme_start_ns_queue(ns);
}
/**
* nvme_kill_queues(): Ends all namespace queues
* @ctrl: the dead controller that needs to end
*
* Call this function when the driver determines it is unable to get the
* controller in a state capable of servicing IO.
*/
void nvme_kill_queues(struct nvme_ctrl *ctrl)
{ {
struct nvme_ns *ns; struct nvme_ns *ns;
down_read(&ctrl->namespaces_rwsem); down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) list_for_each_entry(ns, &ctrl->namespaces, list)
nvme_set_queue_dying(ns); blk_mark_disk_dead(ns->disk);
up_read(&ctrl->namespaces_rwsem); up_read(&ctrl->namespaces_rwsem);
} }
EXPORT_SYMBOL_GPL(nvme_kill_queues); EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
void nvme_unfreeze(struct nvme_ctrl *ctrl) void nvme_unfreeze(struct nvme_ctrl *ctrl)
{ {
......
...@@ -483,7 +483,6 @@ struct nvme_ns { ...@@ -483,7 +483,6 @@ struct nvme_ns {
unsigned long features; unsigned long features;
unsigned long flags; unsigned long flags;
#define NVME_NS_REMOVING 0 #define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2 #define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3 #define NVME_NS_FORCE_RO 3
#define NVME_NS_READY 4 #define NVME_NS_READY 4
...@@ -758,7 +757,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl); ...@@ -758,7 +757,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_stop_admin_queue(struct nvme_ctrl *ctrl); void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
void nvme_start_admin_queue(struct nvme_ctrl *ctrl); void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl); void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl); void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl); void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
void nvme_unfreeze(struct nvme_ctrl *ctrl); void nvme_unfreeze(struct nvme_ctrl *ctrl);
......
...@@ -2788,7 +2788,8 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev) ...@@ -2788,7 +2788,8 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
nvme_get_ctrl(&dev->ctrl); nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
nvme_kill_queues(&dev->ctrl); nvme_mark_namespaces_dead(&dev->ctrl);
nvme_start_queues(&dev->ctrl);
if (!queue_work(nvme_wq, &dev->remove_work)) if (!queue_work(nvme_wq, &dev->remove_work))
nvme_put_ctrl(&dev->ctrl); nvme_put_ctrl(&dev->ctrl);
} }
...@@ -2913,7 +2914,8 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -2913,7 +2914,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_unfreeze(&dev->ctrl); nvme_unfreeze(&dev->ctrl);
} else { } else {
dev_warn(dev->ctrl.device, "IO queues lost\n"); dev_warn(dev->ctrl.device, "IO queues lost\n");
nvme_kill_queues(&dev->ctrl); nvme_mark_namespaces_dead(&dev->ctrl);
nvme_start_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl);
nvme_free_tagset(dev); nvme_free_tagset(dev);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册