diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d69360a3f0616c6bf2d19339d271bdcd7fbfb84..0a2138a3c8f1cb194337754014447be6a26c13cc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -177,12 +177,37 @@ int sysctl_sched_rt_runtime = 950000; void raw_spin_rq_lock_nested(struct rq *rq, int subclass) { - raw_spin_lock_nested(rq_lockp(rq), subclass); + raw_spinlock_t *lock; + + if (sched_core_disabled()) { + raw_spin_lock_nested(&rq->__lock, subclass); + return; + } + + for (;;) { + lock = rq_lockp(rq); + raw_spin_lock_nested(lock, subclass); + if (likely(lock == rq_lockp(rq))) + return; + raw_spin_unlock(lock); + } } bool raw_spin_rq_trylock(struct rq *rq) { - return raw_spin_trylock(rq_lockp(rq)); + raw_spinlock_t *lock; + bool ret; + + if (sched_core_disabled()) + return raw_spin_trylock(&rq->__lock); + + for (;;) { + lock = rq_lockp(rq); + ret = raw_spin_trylock(lock); + if (!ret || (likely(lock == rq_lockp(rq)))) + return ret; + raw_spin_unlock(lock); + } } void raw_spin_rq_unlock(struct rq *rq) @@ -190,6 +215,25 @@ void raw_spin_rq_unlock(struct rq *rq) raw_spin_unlock(rq_lockp(rq)); } +#ifdef CONFIG_SMP +/* + * double_rq_lock - safely lock two runqueues + */ +void double_rq_lock(struct rq *rq1, struct rq *rq2) +{ + lockdep_assert_irqs_disabled(); + + if (rq_order_less(rq2, rq1)) + swap(rq1, rq2); + + raw_spin_rq_lock(rq1); + if (rq_lockp(rq1) == rq_lockp(rq2)) + return; + + raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); +} +#endif + /* * __task_rq_lock - lock the rq @p resides on. */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4aed3a7923a25c1a2c0483e2a18a270720afd4b1..ec6be3e11d60dabbffaa1d0ccfb57a0df9474334 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1151,6 +1151,11 @@ enum task_qos_level { void init_qos_hrtimer(int cpu); #endif +static inline bool sched_core_disabled(void) +{ + return true; +} + static inline raw_spinlock_t *rq_lockp(struct rq *rq) { return &rq->__lock; @@ -2203,10 +2208,17 @@ unsigned long arch_scale_freq_capacity(int cpu) } #endif + #ifdef CONFIG_SMP -#ifdef CONFIG_PREEMPTION -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); +static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) +{ + return rq1->cpu < rq2->cpu; +} + +extern void double_rq_lock(struct rq *rq1, struct rq *rq2); + +#ifdef CONFIG_PREEMPTION /* * fair double_lock_balance: Safely acquires both rq->locks in a fair @@ -2246,14 +2258,13 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) if (likely(raw_spin_rq_trylock(busiest))) return 0; - if (rq_lockp(busiest) >= rq_lockp(this_rq)) { + if (rq_order_less(this_rq, busiest)) { raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); return 0; } raw_spin_rq_unlock(this_rq); - raw_spin_rq_lock(busiest); - raw_spin_rq_lock_nested(this_rq, SINGLE_DEPTH_NESTING); + double_rq_lock(this_rq, busiest); return 1; } @@ -2305,31 +2316,6 @@ static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); } -/* - * double_rq_lock - safely lock two runqueues - * - * Note this does not disable interrupts like task_rq_lock, - * you need to do so manually before calling. - */ -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) - __acquires(rq1->lock) - __acquires(rq2->lock) -{ - BUG_ON(!irqs_disabled()); - if (rq_lockp(rq1) == rq_lockp(rq2)) { - raw_spin_rq_lock(rq1); - __acquire(rq2->lock); /* Fake it out ;) */ - } else { - if (rq_lockp(rq1) < rq_lockp(rq2)) { - raw_spin_rq_lock(rq1); - raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); - } else { - raw_spin_rq_lock(rq2); - raw_spin_rq_lock_nested(rq1, SINGLE_DEPTH_NESTING); - } - } -} - /* * double_rq_unlock - safely unlock two runqueues * @@ -2340,11 +2326,11 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { - raw_spin_rq_unlock(rq1); if (rq_lockp(rq1) != rq_lockp(rq2)) raw_spin_rq_unlock(rq2); else __release(rq2->lock); + raw_spin_rq_unlock(rq1); } extern void set_rq_online (struct rq *rq);