提交 aed3ea94 编写于 作者: J Jens Axboe

block: wake up waiters when a queue is marked dying

If it's dying, we can't expect new request to complete and come
in an wake up other tasks waiting for requests. So after we
have marked it as dying, wake up everybody currently waiting
for a request. Once they wake, they will retry their allocation
and fail appropriately due to the state of the queue.
Tested-by: NKeith Busch <keith.busch@intel.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 2b25d981
...@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) ...@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
} }
EXPORT_SYMBOL_GPL(blk_queue_bypass_end); EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
if (q->mq_ops)
blk_mq_wake_waiters(q);
else {
struct request_list *rl;
blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
wake_up(&rl->wait[BLK_RW_SYNC]);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
}
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
/** /**
* blk_cleanup_queue - shutdown a request queue * blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown * @q: request queue to shutdown
...@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
/* mark @q DYING, no new request or merges will be allowed afterwards */ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); blk_set_queue_dying(q);
spin_lock_irq(lock); spin_lock_irq(lock);
/* /*
......
...@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) ...@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
} }
/* /*
* Wakeup all potentially sleeping on normal (non-reserved) tags * Wakeup all potentially sleeping on tags
*/ */
static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{ {
struct blk_mq_bitmap_tags *bt; struct blk_mq_bitmap_tags *bt;
int i, wake_index; int i, wake_index;
...@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) ...@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
wake_index = bt_index_inc(wake_index); wake_index = bt_index_inc(wake_index);
} }
if (include_reserve) {
bt = &tags->breserved_tags;
if (waitqueue_active(&bt->bs[0].wait))
wake_up(&bt->bs[0].wait);
}
} }
/* /*
...@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) ...@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
atomic_dec(&tags->active_queues); atomic_dec(&tags->active_queues);
blk_mq_tag_wakeup_all(tags); blk_mq_tag_wakeup_all(tags, false);
} }
/* /*
...@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) ...@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
* static and should never need resizing. * static and should never need resizing.
*/ */
bt_update_count(&tags->bitmap_tags, tdepth); bt_update_count(&tags->bitmap_tags, tdepth);
blk_mq_tag_wakeup_all(tags); blk_mq_tag_wakeup_all(tags, false);
return 0; return 0;
} }
......
...@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); ...@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
enum { enum {
BLK_MQ_TAG_CACHE_MIN = 1, BLK_MQ_TAG_CACHE_MIN = 1,
......
...@@ -152,6 +152,16 @@ void blk_mq_unfreeze_queue(struct request_queue *q) ...@@ -152,6 +152,16 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
} }
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
void blk_mq_wake_waiters(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_wakeup_all(hctx->tags, true);
}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{ {
return blk_mq_has_free_tags(hctx->tags); return blk_mq_has_free_tags(hctx->tags);
......
...@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q); ...@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq, void blk_mq_clone_flush_request(struct request *flush_rq,
struct request *orig_rq); struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
/* /*
* CPU hotplug helpers * CPU hotplug helpers
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册