diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index de98e2e9d6e1214314e0a513eb33f543043b35db..97682f925ed5cce033069496412755d332ac4fff 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -738,7 +738,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) vruntime += sched_vslice(cfs_rq, se); /* sleeps up to a single latency don't count. */ - if (!initial && sched_feat(FAIR_SLEEPERS)) { + if (!initial) { unsigned long thresh = sysctl_sched_latency; /* diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 404288354aeeb601fae8950f04746f0f10acf1fb..850f9809cf81231bbcf9617a469a012c848c1d82 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -1,10 +1,3 @@ -/* - * Disregards a certain amount of sleep time (sched_latency_ns) and - * considers the task to be running during that period. This gives it - * a service deficit on wakeup, allowing it to run sooner. - */ -SCHED_FEAT(FAIR_SLEEPERS, 1) - /* * Only give sleepers 50% of their service deficit. This allows * them to run sooner, but does not allow tons of sleepers to