提交 5cfb6de7 编写于 作者: T Thomas Gleixner 提交者: Linus Torvalds

[PATCH] hrtimers: clean up callback tracking

Reintroduce ktimers feature "optimized away" by the ktimers review process:
remove the curr_timer pointer from the cpu-base and use the hrtimer state.

No functional changes.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 303e967f
...@@ -136,7 +136,6 @@ struct hrtimer_cpu_base { ...@@ -136,7 +136,6 @@ struct hrtimer_cpu_base {
spinlock_t lock; spinlock_t lock;
struct lock_class_key lock_key; struct lock_class_key lock_key;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
struct hrtimer *curr_timer;
}; };
/* /*
......
...@@ -172,8 +172,6 @@ static inline int hrtimer_callback_running(struct hrtimer *timer) ...@@ -172,8 +172,6 @@ static inline int hrtimer_callback_running(struct hrtimer *timer)
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0)
/* /*
* We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
* means that all timers which are tied to this base via timer->base are * means that all timers which are tied to this base via timer->base are
...@@ -227,7 +225,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) ...@@ -227,7 +225,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
* completed. There is no conflict as we hold the lock until * completed. There is no conflict as we hold the lock until
* the timer is enqueued. * the timer is enqueued.
*/ */
if (unlikely(base->cpu_base->curr_timer == timer)) if (unlikely(timer->state & HRTIMER_STATE_CALLBACK))
return base; return base;
/* See the comment in lock_timer_base() */ /* See the comment in lock_timer_base() */
...@@ -241,8 +239,6 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) ...@@ -241,8 +239,6 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
#define set_curr_timer(b, t) do { } while (0)
static inline struct hrtimer_clock_base * static inline struct hrtimer_clock_base *
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{ {
...@@ -670,7 +666,6 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, ...@@ -670,7 +666,6 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
break; break;
fn = timer->function; fn = timer->function;
set_curr_timer(cpu_base, timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK); __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK);
spin_unlock_irq(&cpu_base->lock); spin_unlock_irq(&cpu_base->lock);
...@@ -684,7 +679,6 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, ...@@ -684,7 +679,6 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
enqueue_hrtimer(timer, base); enqueue_hrtimer(timer, base);
} }
} }
set_curr_timer(cpu_base, NULL);
spin_unlock_irq(&cpu_base->lock); spin_unlock_irq(&cpu_base->lock);
} }
...@@ -871,8 +865,6 @@ static void migrate_hrtimers(int cpu) ...@@ -871,8 +865,6 @@ static void migrate_hrtimers(int cpu)
spin_lock(&old_base->lock); spin_lock(&old_base->lock);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
BUG_ON(old_base->curr_timer);
migrate_hrtimer_list(&old_base->clock_base[i], migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]); &new_base->clock_base[i]);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册