提交 2929f1a3 编写于 作者: Y Yu Kuai 提交者: Yang Yingliang

blk-mq: factor out some helps to quiesce/unquiesce queue

hulk inclusion
category: bugfix
bugzilla: 173974
CVE: NA
---------------------------

Prepare to support concurrent quiesce queue between drivers and block
layer, no functional changes.
Signed-off-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 80086651
......@@ -211,32 +211,29 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
static void __blk_mq_quiesce_queue_nowait(struct request_queue *q,
unsigned int flag)
{
blk_queue_flag_set(flag, q);
}
/*
* FIXME: replace the scsi_internal_device_*block_nowait() calls in the
* mpt3sas driver such that this function can be removed.
*/
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
__blk_mq_quiesce_queue_nowait(q, QUEUE_FLAG_QUIESCED);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
/**
* blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
* @q: request queue.
*
* Note: this function does not prevent that the struct request end_io()
* callback function is invoked. Once this function is returned, we make
* sure no dispatch can happen until the queue is unquiesced via
* blk_mq_unquiesce_queue().
*/
void blk_mq_quiesce_queue(struct request_queue *q)
static void __blk_mq_quiesce_queue(struct request_queue *q, unsigned int flag)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
bool rcu = false;
blk_mq_quiesce_queue_nowait(q);
__blk_mq_quiesce_queue_nowait(q, flag);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
......@@ -247,15 +244,30 @@ void blk_mq_quiesce_queue(struct request_queue *q)
if (rcu)
synchronize_rcu();
}
/**
* blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
* @q: request queue.
*
* Note: this function does not prevent that the struct request end_io()
* callback function is invoked. Once this function is returned, we make
* sure no dispatch can happen until the queue is unquiesced via
* blk_mq_unquiesce_queue().
*/
void blk_mq_quiesce_queue(struct request_queue *q)
{
__blk_mq_quiesce_queue(q, QUEUE_FLAG_QUIESCED);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
bool blk_mq_quiesce_queue_without_rcu(struct request_queue *q)
static bool __blk_mq_quiesce_queue_without_rcu(struct request_queue *q,
unsigned int flag)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
bool rcu = false;
blk_mq_quiesce_queue_nowait(q);
__blk_mq_quiesce_queue_nowait(q, flag);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
......@@ -265,8 +277,21 @@ bool blk_mq_quiesce_queue_without_rcu(struct request_queue *q)
}
return rcu;
}
bool blk_mq_quiesce_queue_without_rcu(struct request_queue *q)
{
return __blk_mq_quiesce_queue_without_rcu(q, QUEUE_FLAG_QUIESCED);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_without_rcu);
static void __blk_mq_unquiesce_queue(struct request_queue *q, unsigned int flag)
{
blk_queue_flag_clear(flag, q);
/* dispatch requests which are inserted during quiescing */
blk_mq_run_hw_queues(q, true);
}
/*
* blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
* @q: request queue.
......@@ -276,10 +301,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_without_rcu);
*/
void blk_mq_unquiesce_queue(struct request_queue *q)
{
blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
/* dispatch requests which are inserted during quiescing */
blk_mq_run_hw_queues(q, true);
__blk_mq_unquiesce_queue(q, QUEUE_FLAG_QUIESCED);
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册