diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index affd97ec9f65a0c1b9751ee9f1d39cf77757b0aa..686ec8adf952fbd3767c652515089d92b3827ae0 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) || task_running(rq, task) || + !dl_task(task) || !task_on_rq_queued(task))) { double_unlock_balance(rq, later_rq); later_rq = NULL; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0fe30e66aff1db44d58ec96cbee332a78257e4d3..40748dc8ea3e957de3c5edfd79c5011bdd813e7b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3030,7 +3030,14 @@ static int idle_balance(struct rq *this_rq); #else /* CONFIG_SMP */ -static inline void update_load_avg(struct sched_entity *se, int update_tg) {} +static inline void update_load_avg(struct sched_entity *se, int not_used) +{ + struct cfs_rq *cfs_rq = cfs_rq_of(se); + struct rq *rq = rq_of(cfs_rq); + + cpufreq_trigger_update(rq_clock(rq)); +} + static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline void diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c41ea7ac1764b831fd015531c3d8f40d445bf8ae..ec4f538d4396beb20e2656923995438d5093f667 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) !cpumask_test_cpu(lowest_rq->cpu, tsk_cpus_allowed(task)) || task_running(rq, task) || + !rt_task(task) || !task_on_rq_queued(task))) { double_unlock_balance(rq, lowest_rq);