diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f7c6ed2fd69ddb7a25a3d89edb10300eaa3b8af4..5536397a0309cf69f60782d4303844662716c618 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3033,7 +3033,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (queued) dequeue_task(rq, p, 0); if (running) - p->sched_class->put_prev_task(rq, p); + put_prev_task(rq, p); /* * Boosting condition are: @@ -3586,7 +3586,7 @@ static int __sched_setscheduler(struct task_struct *p, if (queued) dequeue_task(rq, p, 0); if (running) - p->sched_class->put_prev_task(rq, p); + put_prev_task(rq, p); prev_class = p->sched_class; __setscheduler(rq, p, attr); @@ -4792,7 +4792,7 @@ void sched_setnuma(struct task_struct *p, int nid) if (queued) dequeue_task(rq, p, 0); if (running) - p->sched_class->put_prev_task(rq, p); + put_prev_task(rq, p); p->numa_preferred_nid = nid; @@ -7374,7 +7374,7 @@ void sched_move_task(struct task_struct *tsk) if (queued) dequeue_task(rq, tsk, 0); if (unlikely(running)) - tsk->sched_class->put_prev_task(rq, tsk); + put_prev_task(rq, tsk); tg = container_of(task_css_check(tsk, cpu_cgrp_id, lockdep_is_held(&tsk->sighand->siglock)),