diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 3836af6d45a20612b44325a5d808e89c14876ff1..d3527af8803ab1501389cf05b6d4d364c139ee10 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1088,6 +1088,10 @@ static void throtl_bio_end_io(struct bio *bio) struct throtl_grp *tg; rcu_read_lock(); + /* see comments in throtl_bio_stats_start() */ + if (bio_flagged(bio, BIO_THROTL_STATED)) + goto out; + tg = (struct throtl_grp *)bio->bi_tg_private; if (!tg) goto out; @@ -1096,6 +1100,7 @@ static void throtl_bio_end_io(struct bio *bio) bio_io_start_time_ns(bio), bio_op(bio)); blkg_put(tg_to_blkg(tg)); + bio_clear_flag(bio, BIO_THROTL_STATED); out: rcu_read_unlock(); } @@ -1104,11 +1109,19 @@ static inline void throtl_bio_stats_start(struct bio *bio, struct throtl_grp *tg { int op = bio_op(bio); - if (op == REQ_OP_READ || op == REQ_OP_WRITE) { + /* + * It may happen that end_io will be called twice like dm-thin, + * which will save origin end_io first, and call its overwrite + * end_io and then the saved end_io. We use bio flag + * BIO_THROTL_STATED to do only once statistics. + */ + if ((op == REQ_OP_READ || op == REQ_OP_WRITE) && + !bio_flagged(bio, BIO_THROTL_STATED)) { + blkg_get(tg_to_blkg(tg)); + bio_set_flag(bio, BIO_THROTL_STATED); bio->bi_tg_end_io = throtl_bio_end_io; bio->bi_tg_private = tg; bio_set_start_time_ns(bio); - blkg_get(tg_to_blkg(tg)); } } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index dedee389966c5adbb80a0ead92da01371e4bcf38..4badbaae2a9cdbb8e3728c207d1f22af127e4940 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -238,6 +238,7 @@ struct bio { #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion * of this bio. */ #define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */ +#define BIO_THROTL_STATED 12 /* bio already stated */ /* See BVEC_POOL_OFFSET below before adding new flags */