diff --git a/include/linux/sched.h b/include/linux/sched.h index d086cf0ca2c78d0e6c66501d669f1340fa8527f6..e5b8cbc4b8d66f131ffefaca3bbed7c2f82e2cd9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -606,19 +606,18 @@ struct task_cputime_atomic { #endif /* - * Disable preemption until the scheduler is running. - * Reset by start_kernel()->sched_init()->init_idle(). + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. * - * We include PREEMPT_ACTIVE to avoid cond_resched() from working - * before the scheduler is active -- see should_resched(). + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). */ -#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. * @running: non-zero when there are timers running and - * @cputime receives updates. + * @cputime receives updates. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations.