提交 9bdcaff2 编写于 作者: H Hou Tao 提交者: Caspar Zhang

block: make rq sector size accessible for block stats

to #29361128

commit 3d24430694077313c75c6b89f618db09943621e4 upstream.

Currently rq->data_len will be decreased by partial completion or
zeroed by completion, so when blk_stat_add() is invoked, data_len
will be zero and there will never be samples in poll_cb because
blk_mq_poll_stats_bkt() will return -1 if data_len is zero.

We could move blk_stat_add() back to __blk_mq_complete_request(),
but that would make the effort of trying to call ktime_get_ns()
once in vain. Instead we can reuse throtl_size field, and use
it for both block stats and block throttle, and adjust the
logic in blk_mq_poll_stats_bkt() accordingly.

Fixes: 4bc6339a ("block: move blk_stat_add() to __blk_mq_end_request()")
Tested-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJiufei Xue <jiufei.xue@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 fc94dc72
......@@ -3011,9 +3011,7 @@ void blk_start_request(struct request *req)
if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
req->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
req->throtl_size = blk_rq_sectors(req);
#endif
req->stats_sectors = blk_rq_sectors(req);
req->rq_flags |= RQF_STATS;
rq_qos_issue(req->q, req);
}
......
......@@ -42,12 +42,12 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
int ddir, bytes, bucket;
int ddir, sectors, bucket;
ddir = rq_data_dir(rq);
bytes = blk_rq_bytes(rq);
sectors = blk_rq_stats_sectors(rq);
bucket = ddir + 2*(ilog2(bytes) - 9);
bucket = ddir + 2 * ilog2(sectors);
if (bucket < 0)
return -1;
......@@ -342,6 +342,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
#endif
rq->start_time_ns = ktime_get_ns();
rq->io_start_time_ns = 0;
rq->stats_sectors = 0;
rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
......@@ -674,9 +675,7 @@ void blk_mq_start_request(struct request *rq)
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
rq->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
rq->throtl_size = blk_rq_sectors(rq);
#endif
rq->stats_sectors = blk_rq_sectors(rq);
rq->rq_flags |= RQF_STATS;
rq_qos_issue(q, rq);
}
......
......@@ -2472,7 +2472,8 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
struct request_queue *q = rq->q;
struct throtl_data *td = q->td;
throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
time_ns >> 10);
}
void blk_throtl_bio_endio(struct bio *bio)
......
......@@ -224,9 +224,12 @@ struct request {
#ifdef CONFIG_BLK_WBT
unsigned short wbt_flags;
#endif
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
unsigned short throtl_size;
#endif
/*
* rq sectors used for blk stats. It has the same value
* with blk_rq_sectors(rq), except that it never be zeroed
* by completion.
*/
unsigned short stats_sectors;
/*
* Number of scatter-gather DMA addr+len pairs after
......@@ -1064,6 +1067,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
* blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
* blk_rq_stats_sectors() : sectors of the entire request used for stats
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
......@@ -1092,6 +1096,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
}
static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
{
return rq->stats_sectors;
}
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册