提交 76a86f9d 编写于 作者: J Jens Axboe

block: remove REQ_ATOM_POLL_SLEPT

We don't need this to be an atomic flag, it can be a regular
flag. We either end up on the same CPU for the polling, in which
case the state is sane, or we did the sleep which would imply
the needed barrier to ensure we see the right state.
Reviewed-by: NBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: NOmar Sandoval <osandov@fb.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 5d75d3f2
...@@ -290,13 +290,13 @@ static const char *const rqf_name[] = { ...@@ -290,13 +290,13 @@ static const char *const rqf_name[] = {
RQF_NAME(SPECIAL_PAYLOAD), RQF_NAME(SPECIAL_PAYLOAD),
RQF_NAME(ZONE_WRITE_LOCKED), RQF_NAME(ZONE_WRITE_LOCKED),
RQF_NAME(MQ_TIMEOUT_EXPIRED), RQF_NAME(MQ_TIMEOUT_EXPIRED),
RQF_NAME(MQ_POLL_SLEPT),
}; };
#undef RQF_NAME #undef RQF_NAME
#define RQAF_NAME(name) [REQ_ATOM_##name] = #name #define RQAF_NAME(name) [REQ_ATOM_##name] = #name
static const char *const rqaf_name[] = { static const char *const rqaf_name[] = {
RQAF_NAME(COMPLETE), RQAF_NAME(COMPLETE),
RQAF_NAME(POLL_SLEPT),
}; };
#undef RQAF_NAME #undef RQAF_NAME
......
...@@ -483,7 +483,6 @@ void blk_mq_free_request(struct request *rq) ...@@ -483,7 +483,6 @@ void blk_mq_free_request(struct request *rq)
blk_put_rl(blk_rq_rl(rq)); blk_put_rl(blk_rq_rl(rq));
blk_mq_rq_update_state(rq, MQ_RQ_IDLE); blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
if (rq->tag != -1) if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1) if (sched_tag != -1)
...@@ -2976,7 +2975,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, ...@@ -2976,7 +2975,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
unsigned int nsecs; unsigned int nsecs;
ktime_t kt; ktime_t kt;
if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags)) if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
return false; return false;
/* /*
...@@ -2996,7 +2995,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, ...@@ -2996,7 +2995,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
if (!nsecs) if (!nsecs)
return false; return false;
set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); rq->rq_flags |= RQF_MQ_POLL_SLEPT;
/* /*
* This will be replaced with the stats tracking code, using * This will be replaced with the stats tracking code, using
......
...@@ -124,8 +124,6 @@ void blk_account_io_done(struct request *req); ...@@ -124,8 +124,6 @@ void blk_account_io_done(struct request *req);
*/ */
enum rq_atomic_flags { enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0, REQ_ATOM_COMPLETE = 0,
REQ_ATOM_POLL_SLEPT,
}; };
/* /*
......
...@@ -127,6 +127,8 @@ typedef __u32 __bitwise req_flags_t; ...@@ -127,6 +127,8 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
/* timeout is expired */ /* timeout is expired */
#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20)) #define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
/* already slept for hybrid poll */
#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */ /* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \ #define RQF_NOMERGE_FLAGS \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册