未验证 提交 a397e702 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!984 [sync] PR-946: icost bugfix

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/946 
 
PR sync from:  Li Nan <linan122@huawei.com>
 https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/thread/EMFAV2GNNSOMDT4IALQKWD6C4CHZT436/ 
This patch series fix iocost bug.

Li Nan (1):
  blk-iocost: fix UAF in ioc_pd_free

Yu Kuai (3):
  blk-iocost: track whether iocg is still online
  blk-iocost: don't throttle bio if iocg is offlined
  blk-iocost: dispatch all throttled bio in ioc_pd_offline


-- 
2.39.2
 
 
Link:https://gitee.com/openeuler/kernel/pulls/984 

Reviewed-by: Hou Tao <houtao1@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
...@@ -486,6 +486,7 @@ struct ioc_gq { ...@@ -486,6 +486,7 @@ struct ioc_gq {
u32 inuse; u32 inuse;
u32 last_inuse; u32 last_inuse;
bool online;
s64 saved_margin; s64 saved_margin;
sector_t cursor; /* to detect randio */ sector_t cursor; /* to detect randio */
...@@ -702,6 +703,20 @@ static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg) ...@@ -702,6 +703,20 @@ static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
struct ioc_cgrp, cpd); struct ioc_cgrp, cpd);
} }
static struct ioc_gq *ioc_bio_iocg(struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
if (blkg && blkg->online) {
struct ioc_gq *iocg = blkg_to_iocg(blkg);
if (iocg && iocg->online)
return iocg;
}
return NULL;
}
/* /*
* Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
* weight, the more expensive each IO. Must round up. * weight, the more expensive each IO. Must round up.
...@@ -1218,6 +1233,9 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) ...@@ -1218,6 +1233,9 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
spin_lock_irq(&ioc->lock); spin_lock_irq(&ioc->lock);
if (!iocg->online)
goto fail_unlock;
ioc_now(ioc, now); ioc_now(ioc, now);
/* update period */ /* update period */
...@@ -1387,14 +1405,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, ...@@ -1387,14 +1405,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
{ {
struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait); struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key; struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
ctx->vbudget -= cost; if (ctx->iocg->online) {
u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
ctx->vbudget -= cost;
if (ctx->vbudget < 0)
return -1;
if (ctx->vbudget < 0) iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
return -1; }
iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
wait->committed = true; wait->committed = true;
/* /*
...@@ -2542,9 +2563,8 @@ static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) ...@@ -2542,9 +2563,8 @@ static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
{ {
struct blkcg_gq *blkg = bio->bi_blkg;
struct ioc *ioc = rqos_to_ioc(rqos); struct ioc *ioc = rqos_to_ioc(rqos);
struct ioc_gq *iocg = blkg_to_iocg(blkg); struct ioc_gq *iocg = ioc_bio_iocg(bio);
struct ioc_now now; struct ioc_now now;
struct iocg_wait wait; struct iocg_wait wait;
u64 abs_cost, cost, vtime; u64 abs_cost, cost, vtime;
...@@ -2678,7 +2698,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) ...@@ -2678,7 +2698,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
struct bio *bio) struct bio *bio)
{ {
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); struct ioc_gq *iocg = ioc_bio_iocg(bio);
struct ioc *ioc = rqos_to_ioc(rqos); struct ioc *ioc = rqos_to_ioc(rqos);
sector_t bio_end = bio_end_sector(bio); sector_t bio_end = bio_end_sector(bio);
struct ioc_now now; struct ioc_now now;
...@@ -2736,7 +2756,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, ...@@ -2736,7 +2756,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
{ {
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); struct ioc_gq *iocg = ioc_bio_iocg(bio);
if (iocg && bio->bi_iocost_cost) if (iocg && bio->bi_iocost_cost)
atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime); atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
...@@ -2939,6 +2959,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd) ...@@ -2939,6 +2959,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
ioc_now(ioc, &now); ioc_now(ioc, &now);
iocg->ioc = ioc; iocg->ioc = ioc;
iocg->online = true;
atomic64_set(&iocg->vtime, now.vnow); atomic64_set(&iocg->vtime, now.vnow);
atomic64_set(&iocg->done_vtime, now.vnow); atomic64_set(&iocg->done_vtime, now.vnow);
atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
...@@ -2964,14 +2985,18 @@ static void ioc_pd_init(struct blkg_policy_data *pd) ...@@ -2964,14 +2985,18 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
spin_unlock_irqrestore(&ioc->lock, flags); spin_unlock_irqrestore(&ioc->lock, flags);
} }
static void ioc_pd_free(struct blkg_policy_data *pd) static void ioc_pd_offline(struct blkg_policy_data *pd)
{ {
struct ioc_gq *iocg = pd_to_iocg(pd); struct ioc_gq *iocg = pd_to_iocg(pd);
struct ioc *ioc = iocg->ioc; struct ioc *ioc = iocg->ioc;
unsigned long flags; unsigned long flags;
if (ioc) { if (ioc) {
spin_lock_irqsave(&ioc->lock, flags); struct iocg_wake_ctx ctx = { .iocg = iocg };
iocg_lock(iocg, true, &flags);
iocg->online = false;
if (!list_empty(&iocg->active_list)) { if (!list_empty(&iocg->active_list)) {
struct ioc_now now; struct ioc_now now;
...@@ -2984,10 +3009,17 @@ static void ioc_pd_free(struct blkg_policy_data *pd) ...@@ -2984,10 +3009,17 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
WARN_ON_ONCE(!list_empty(&iocg->walk_list)); WARN_ON_ONCE(!list_empty(&iocg->walk_list));
WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
spin_unlock_irqrestore(&ioc->lock, flags); iocg_unlock(iocg, true, &flags);
hrtimer_cancel(&iocg->waitq_timer); hrtimer_cancel(&iocg->waitq_timer);
__wake_up(&iocg->waitq, TASK_NORMAL, 0, &ctx);
} }
}
static void ioc_pd_free(struct blkg_policy_data *pd)
{
struct ioc_gq *iocg = pd_to_iocg(pd);
free_percpu(iocg->pcpu_stat); free_percpu(iocg->pcpu_stat);
kfree(iocg); kfree(iocg);
} }
...@@ -3468,6 +3500,7 @@ static struct blkcg_policy blkcg_policy_iocost = { ...@@ -3468,6 +3500,7 @@ static struct blkcg_policy blkcg_policy_iocost = {
.cpd_free_fn = ioc_cpd_free, .cpd_free_fn = ioc_cpd_free,
.pd_alloc_fn = ioc_pd_alloc, .pd_alloc_fn = ioc_pd_alloc,
.pd_init_fn = ioc_pd_init, .pd_init_fn = ioc_pd_init,
.pd_offline_fn = ioc_pd_offline,
.pd_free_fn = ioc_pd_free, .pd_free_fn = ioc_pd_free,
.pd_stat_fn = ioc_pd_stat, .pd_stat_fn = ioc_pd_stat,
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册