diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f0abb8fe0ae967ebd10e02beeed04cb153132bf9..ac4229b00a78c68ba15ad85e04fb9f5966654fa3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4699,6 +4699,11 @@ static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) if (runtime_refresh_within(cfs_b, min_left)) return; + /* don't push forwards an existing deferred unthrottle */ + if (cfs_b->slack_started) + return; + cfs_b->slack_started = true; + hrtimer_start(&cfs_b->slack_timer, ns_to_ktime(cfs_bandwidth_slack_period), HRTIMER_MODE_REL); @@ -4749,6 +4754,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) /* confirm we're still not at a refresh boundary */ raw_spin_lock(&cfs_b->lock); + cfs_b->slack_started = false; if (cfs_b->distribute_running) { raw_spin_unlock(&cfs_b->lock); return; @@ -4917,6 +4923,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cfs_b->slack_timer.function = sched_cfs_slack_timer; cfs_b->distribute_running = 0; + cfs_b->slack_started = false; } static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b3d4b61dbbe54ca6e21019d78f3971637bf112d9..0a0382f8e509914901a4bfb3fa1d86207b0455a6 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -337,8 +337,10 @@ struct cfs_bandwidth { u64 runtime; s64 hierarchical_quota; - short idle; - short period_active; + u8 idle; + u8 period_active; + u8 distribute_running; + u8 slack_started; struct hrtimer period_timer; struct hrtimer slack_timer; struct list_head throttled_cfs_rq; @@ -347,8 +349,6 @@ struct cfs_bandwidth { int nr_periods; int nr_throttled; u64 throttled_time; - - bool distribute_running; #endif };