diff --git a/kernel/sched.c b/kernel/sched.c index 54fa282657cc098cf47b1dd6e3bfc2fc9315410b..19c0d5d16fef1b84c310a54ff0f0869072f7d2a8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -359,7 +359,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) __acquires(rq->lock) { struct runqueue *rq;