diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 66209d68845664f7e1cb8b6ea40b46da19e00b86..cfaf2b18f28a0b21f15b4cb17e68d2741e49adc0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -416,7 +416,7 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) * Note: must be called with a freshly updated rq->fair_clock. */ static inline void -__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) +__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) { unsigned long delta_fair = se->delta_fair_run; @@ -441,7 +441,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) se->delta_fair_run += delta_fair; if (unlikely(abs(se->delta_fair_run) >= sysctl_sched_stat_granularity)) { - __update_stats_wait_end(cfs_rq, se, now); + __update_stats_wait_end(cfs_rq, se); se->delta_fair_run = 0; }