提交 537821c0 编写于 作者: T Tejun Heo 提交者: Zheng Zengkai

block: don't merge across cgroup boundaries if blkcg is enabled

mainline inclusion
from mainline-v5.18-rc1
commit 6b2b0459
category: bugfix
bugzilla: 187443, https://gitee.com/openeuler/kernel/issues/I5Z7O2
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/fs?h=v6.0-rc5&id=6b2b04590b51aa4cf395fcd185ce439cab5961dc

---------------------------

blk-iocost and iolatency are cgroup aware rq-qos policies but they didn't
disable merges across different cgroups. This obviously can lead to
accounting and control errors but more importantly to priority inversions -
e.g. an IO which belongs to a higher priority cgroup or IO class may end up
getting throttled incorrectly because it gets merged to an IO issued from a
low priority cgroup.

Fix it by adding blk_cgroup_mergeable() which is called from merge paths and
rejects cross-cgroup and cross-issue_as_root merges.
Signed-off-by: NTejun Heo <tj@kernel.org>
Fixes: d7067512 ("block: introduce blk-iolatency io controller")
Cc: stable@vger.kernel.org # v4.19+
Cc: Josef Bacik <jbacik@fb.com>
Link: https://lore.kernel.org/r/Yi/eE/6zFNyWJ+qd@slm.duckdns.orgSigned-off-by: NJens Axboe <axboe@kernel.dk>

conflicts:
	block/blk-merge.c
	include/linux/blk-cgroup.h
Signed-off-by: NLi Nan <linan122@huawei.com>
Reviewed-by: NJason Yan <yanaijie@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 3b87d266
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/blk-cgroup.h>
#include <trace/events/block.h> #include <trace/events/block.h>
...@@ -554,6 +555,9 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq) ...@@ -554,6 +555,9 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
static inline int ll_new_hw_segment(struct request *req, struct bio *bio, static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs) unsigned int nr_phys_segs)
{ {
if (!blk_cgroup_mergeable(req, bio))
goto no_merge;
if (blk_integrity_merge_bio(req->q, req, bio) == false) if (blk_integrity_merge_bio(req->q, req, bio) == false)
goto no_merge; goto no_merge;
...@@ -650,6 +654,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -650,6 +654,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
if (total_phys_segments > blk_rq_get_max_segments(req)) if (total_phys_segments > blk_rq_get_max_segments(req))
return 0; return 0;
if (!blk_cgroup_mergeable(req, next->bio))
return 0;
if (blk_integrity_merge_rq(q, req, next) == false) if (blk_integrity_merge_rq(q, req, next) == false)
return 0; return 0;
...@@ -860,6 +867,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -860,6 +867,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->rq_disk != bio->bi_disk) if (rq->rq_disk != bio->bi_disk)
return false; return false;
/* don't merge across cgroup boundaries */
if (!blk_cgroup_mergeable(rq, bio))
return false;
/* only merge integrity protected bio into ditto rq */ /* only merge integrity protected bio into ditto rq */
if (blk_integrity_merge_bio(rq->q, rq, bio) == false) if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
return false; return false;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/blk-mq.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2) #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
...@@ -610,6 +611,21 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) ...@@ -610,6 +611,21 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
} }
/**
* blk_cgroup_mergeable - Determine whether to allow or disallow merges
* @rq: request to merge into
* @bio: bio to merge
*
* @bio and @rq should belong to the same cgroup and their issue_as_root should
* match. The latter is necessary as we don't want to throttle e.g. a metadata
* update because it happens to be next to a regular IO.
*/
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
{
return rq->bio->bi_blkg == bio->bi_blkg &&
bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
}
void blk_cgroup_bio_start(struct bio *bio); void blk_cgroup_bio_start(struct bio *bio);
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
...@@ -665,6 +681,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { } ...@@ -665,6 +681,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline void blk_cgroup_bio_start(struct bio *bio) { } static inline void blk_cgroup_bio_start(struct bio *bio) { }
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
#define blk_queue_for_each_rl(rl, q) \ #define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册