提交 5a61c363 编写于 作者: T Tejun Heo 提交者: Jens Axboe

blk-mq: remove REQ_ATOM_STARTED

After the recent updates to use generation number and state based
synchronization, we can easily replace REQ_ATOM_STARTED usages by
adding an extra state to distinguish completed but not yet freed
state.

Add MQ_RQ_COMPLETE and replace REQ_ATOM_STARTED usages with
blk_mq_rq_state() tests.  REQ_ATOM_STARTED no longer has any users
left and is removed.
Signed-off-by: NTejun Heo <tj@kernel.org>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 634f9e46
...@@ -271,7 +271,6 @@ static const char *const cmd_flag_name[] = { ...@@ -271,7 +271,6 @@ static const char *const cmd_flag_name[] = {
#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
static const char *const rqf_name[] = { static const char *const rqf_name[] = {
RQF_NAME(SORTED), RQF_NAME(SORTED),
RQF_NAME(STARTED),
RQF_NAME(QUEUED), RQF_NAME(QUEUED),
RQF_NAME(SOFTBARRIER), RQF_NAME(SOFTBARRIER),
RQF_NAME(FLUSH_SEQ), RQF_NAME(FLUSH_SEQ),
...@@ -295,7 +294,6 @@ static const char *const rqf_name[] = { ...@@ -295,7 +294,6 @@ static const char *const rqf_name[] = {
#define RQAF_NAME(name) [REQ_ATOM_##name] = #name #define RQAF_NAME(name) [REQ_ATOM_##name] = #name
static const char *const rqaf_name[] = { static const char *const rqaf_name[] = {
RQAF_NAME(COMPLETE), RQAF_NAME(COMPLETE),
RQAF_NAME(STARTED),
RQAF_NAME(POLL_SLEPT), RQAF_NAME(POLL_SLEPT),
}; };
#undef RQAF_NAME #undef RQAF_NAME
...@@ -409,7 +407,7 @@ static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved) ...@@ -409,7 +407,7 @@ static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
const struct show_busy_params *params = data; const struct show_busy_params *params = data;
if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx && if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) blk_mq_rq_state(rq) != MQ_RQ_IDLE)
__blk_mq_debugfs_rq_show(params->m, __blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist)); list_entry_rq(&rq->queuelist));
} }
......
...@@ -483,7 +483,6 @@ void blk_mq_free_request(struct request *rq) ...@@ -483,7 +483,6 @@ void blk_mq_free_request(struct request *rq)
blk_put_rl(blk_rq_rl(rq)); blk_put_rl(blk_rq_rl(rq));
blk_mq_rq_update_state(rq, MQ_RQ_IDLE); blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
if (rq->tag != -1) if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
...@@ -531,6 +530,7 @@ static void __blk_mq_complete_request(struct request *rq) ...@@ -531,6 +530,7 @@ static void __blk_mq_complete_request(struct request *rq)
int cpu; int cpu;
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT); WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
if (rq->internal_tag != -1) if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq); blk_mq_sched_completed_request(rq);
...@@ -642,7 +642,7 @@ EXPORT_SYMBOL(blk_mq_complete_request); ...@@ -642,7 +642,7 @@ EXPORT_SYMBOL(blk_mq_complete_request);
int blk_mq_request_started(struct request *rq) int blk_mq_request_started(struct request *rq)
{ {
return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
} }
EXPORT_SYMBOL_GPL(blk_mq_request_started); EXPORT_SYMBOL_GPL(blk_mq_request_started);
...@@ -661,7 +661,6 @@ void blk_mq_start_request(struct request *rq) ...@@ -661,7 +661,6 @@ void blk_mq_start_request(struct request *rq)
} }
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
/* /*
* Mark @rq in-flight which also advances the generation number, * Mark @rq in-flight which also advances the generation number,
...@@ -683,8 +682,6 @@ void blk_mq_start_request(struct request *rq) ...@@ -683,8 +682,6 @@ void blk_mq_start_request(struct request *rq)
write_seqcount_end(&rq->gstate_seq); write_seqcount_end(&rq->gstate_seq);
preempt_enable(); preempt_enable();
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
if (q->dma_drain_size && blk_rq_bytes(rq)) { if (q->dma_drain_size && blk_rq_bytes(rq)) {
/* /*
* Make sure space for the drain appears. We know we can do * Make sure space for the drain appears. We know we can do
...@@ -697,13 +694,9 @@ void blk_mq_start_request(struct request *rq) ...@@ -697,13 +694,9 @@ void blk_mq_start_request(struct request *rq)
EXPORT_SYMBOL(blk_mq_start_request); EXPORT_SYMBOL(blk_mq_start_request);
/* /*
* When we reach here because queue is busy, REQ_ATOM_COMPLETE * When we reach here because queue is busy, it's safe to change the state
* flag isn't set yet, so there may be race with timeout handler, * to IDLE without checking @rq->aborted_gstate because we should still be
* but given rq->deadline is just set in .queue_rq() under * holding the RCU read lock and thus protected against timeout.
* this situation, the race won't be possible in reality because
* rq->timeout should be set as big enough to cover the window
* between blk_mq_start_request() called from .queue_rq() and
* clearing REQ_ATOM_STARTED here.
*/ */
static void __blk_mq_requeue_request(struct request *rq) static void __blk_mq_requeue_request(struct request *rq)
{ {
...@@ -715,7 +708,7 @@ static void __blk_mq_requeue_request(struct request *rq) ...@@ -715,7 +708,7 @@ static void __blk_mq_requeue_request(struct request *rq)
wbt_requeue(q->rq_wb, &rq->issue_stat); wbt_requeue(q->rq_wb, &rq->issue_stat);
blk_mq_sched_requeue_request(rq); blk_mq_sched_requeue_request(rq);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
blk_mq_rq_update_state(rq, MQ_RQ_IDLE); blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
if (q->dma_drain_size && blk_rq_bytes(rq)) if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--; rq->nr_phys_segments--;
...@@ -822,18 +815,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved) ...@@ -822,18 +815,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
const struct blk_mq_ops *ops = req->q->mq_ops; const struct blk_mq_ops *ops = req->q->mq_ops;
enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER; enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
/*
* We know that complete is set at this point. If STARTED isn't set
* anymore, then the request isn't active and the "timeout" should
* just be ignored. This can happen due to the bitflag ordering.
* Timeout first checks if STARTED is set, and if it is, assumes
* the request is active. But if we race with completion, then
* both flags will get cleared. So check here again, and ignore
* a timeout event with a request that isn't active.
*/
if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
return;
req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED; req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
if (ops->timeout) if (ops->timeout)
...@@ -869,8 +850,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, ...@@ -869,8 +850,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
might_sleep(); might_sleep();
if ((rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) || if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
return; return;
/* read coherent snapshots of @rq->state_gen and @rq->deadline */ /* read coherent snapshots of @rq->state_gen and @rq->deadline */
...@@ -3022,8 +3002,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, ...@@ -3022,8 +3002,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
hrtimer_init_sleeper(&hs, current); hrtimer_init_sleeper(&hs, current);
do { do {
if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) && if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
break; break;
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
hrtimer_start_expires(&hs.timer, mode); hrtimer_start_expires(&hs.timer, mode);
......
...@@ -34,6 +34,7 @@ struct blk_mq_ctx { ...@@ -34,6 +34,7 @@ struct blk_mq_ctx {
enum mq_rq_state { enum mq_rq_state {
MQ_RQ_IDLE = 0, MQ_RQ_IDLE = 0,
MQ_RQ_IN_FLIGHT = 1, MQ_RQ_IN_FLIGHT = 1,
MQ_RQ_COMPLETE = 2,
MQ_RQ_STATE_BITS = 2, MQ_RQ_STATE_BITS = 2,
MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1, MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
......
...@@ -124,7 +124,6 @@ void blk_account_io_done(struct request *req); ...@@ -124,7 +124,6 @@ void blk_account_io_done(struct request *req);
*/ */
enum rq_atomic_flags { enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0, REQ_ATOM_COMPLETE = 0,
REQ_ATOM_STARTED,
REQ_ATOM_POLL_SLEPT, REQ_ATOM_POLL_SLEPT,
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册