提交 d6784b9f 编写于 作者: Y Yu Kuai 提交者: Yang Yingliang

blk-mq: add a new queue flag to quiesce/unquiesce queue

hulk inclusion
category: bugfix
bugzilla: 173974
CVE: NA
---------------------------

Queue will be quiesced if the old or the new flag is set, and the
queue will be unqiesced if both flags is cleared.
Signed-off-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 2929f1a3
...@@ -195,7 +195,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) ...@@ -195,7 +195,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
LIST_HEAD(rq_list); LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */ /* RCU or SRCU read lock is needed before checking quiesced flag */
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q) ||
blk_queue_quiesced_internal(q)))
return; return;
hctx->run++; hctx->run++;
......
...@@ -260,6 +260,11 @@ void blk_mq_quiesce_queue(struct request_queue *q) ...@@ -260,6 +260,11 @@ void blk_mq_quiesce_queue(struct request_queue *q)
} }
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
void blk_mq_quiesce_queue_internal(struct request_queue *q)
{
__blk_mq_quiesce_queue(q, QUEUE_FLAG_QUIESCED_INTERNAL);
}
static bool __blk_mq_quiesce_queue_without_rcu(struct request_queue *q, static bool __blk_mq_quiesce_queue_without_rcu(struct request_queue *q,
unsigned int flag) unsigned int flag)
{ {
...@@ -305,6 +310,11 @@ void blk_mq_unquiesce_queue(struct request_queue *q) ...@@ -305,6 +310,11 @@ void blk_mq_unquiesce_queue(struct request_queue *q)
} }
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
void blk_mq_unquiesce_queue_internal(struct request_queue *q)
{
__blk_mq_unquiesce_queue(q, QUEUE_FLAG_QUIESCED_INTERNAL);
}
void blk_mq_wake_waiters(struct request_queue *q) void blk_mq_wake_waiters(struct request_queue *q)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
...@@ -1491,6 +1501,7 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) ...@@ -1491,6 +1501,7 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
*/ */
hctx_lock(hctx, &srcu_idx); hctx_lock(hctx, &srcu_idx);
need_run = !blk_queue_quiesced(hctx->queue) && need_run = !blk_queue_quiesced(hctx->queue) &&
!blk_queue_quiesced_internal(hctx->queue) &&
blk_mq_hctx_has_pending(hctx); blk_mq_hctx_has_pending(hctx);
hctx_unlock(hctx, srcu_idx); hctx_unlock(hctx, srcu_idx);
...@@ -1844,7 +1855,8 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1844,7 +1855,8 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
* blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
* and avoid driver to try to dispatch again. * and avoid driver to try to dispatch again.
*/ */
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q) ||
blk_queue_quiesced_internal(q)) {
run_queue = false; run_queue = false;
bypass_insert = false; bypass_insert = false;
goto insert; goto insert;
......
...@@ -46,6 +46,8 @@ bool blk_mq_get_driver_tag(struct request *rq); ...@@ -46,6 +46,8 @@ bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start); struct blk_mq_ctx *start);
void blk_mq_put_rq_ref(struct request *rq); void blk_mq_put_rq_ref(struct request *rq);
void blk_mq_quiesce_queue_internal(struct request_queue *q);
void blk_mq_unquiesce_queue_internal(struct request_queue *q);
/* /*
* Internal helpers for allocating/freeing the request map * Internal helpers for allocating/freeing the request map
......
...@@ -725,8 +725,11 @@ struct request_queue { ...@@ -725,8 +725,11 @@ struct request_queue {
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ /* queue has been quiesced, used in driver */
#define QUEUE_FLAG_QUIESCED 28
#define QUEUE_FLAG_FORECE_QUIESCE 29 /* force quiesce when cleanup queue */ #define QUEUE_FLAG_FORECE_QUIESCE 29 /* force quiesce when cleanup queue */
/* queue has bee quiesced, used in block layer */
#define QUEUE_FLAG_QUIESCED_INTERNAL 30
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_SAME_COMP) | \
...@@ -763,7 +766,9 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); ...@@ -763,7 +766,9 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_noretry_request(rq) \ #define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER)) REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_quiesced_internal(q) \
test_bit(QUEUE_FLAG_QUIESCED_INTERNAL, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册