提交 c458b1d1 编写于 作者: A Anna-Maria Gleixner 提交者: Ingo Molnar

hrtimer: Prepare handling of hard and softirq based hrtimers

The softirq based hrtimer can utilize most of the existing hrtimers
functions, but need to operate on a different data set.

Add an 'active_mask' parameter to various functions so the hard and soft bases
can be selected. Fixup the existing callers and hand in the ACTIVE_HARD
mask.
Signed-off-by: NAnna-Maria Gleixner <anna-maria@linutronix.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: keescook@chromium.org
Link: http://lkml.kernel.org/r/20171221104205.7269-28-anna-maria@linutronix.deSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 98ecadd4
...@@ -59,6 +59,15 @@ ...@@ -59,6 +59,15 @@
#include "tick-internal.h" #include "tick-internal.h"
/*
* Masks for selecting the soft and hard context timers from
* cpu_base->active
*/
#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
/* /*
* The timer bases: * The timer bases:
* *
...@@ -507,13 +516,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, ...@@ -507,13 +516,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
return expires_next; return expires_next;
} }
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) /*
* Recomputes cpu_base::*next_timer and returns the earliest expires_next but
* does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
*
* @active_mask must be one of:
* - HRTIMER_ACTIVE,
* - HRTIMER_ACTIVE_SOFT, or
* - HRTIMER_ACTIVE_HARD.
*/
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
unsigned int active_mask)
{ {
unsigned int active = cpu_base->active_bases; unsigned int active;
ktime_t expires_next = KTIME_MAX; ktime_t expires_next = KTIME_MAX;
cpu_base->next_timer = NULL; cpu_base->next_timer = NULL;
active = cpu_base->active_bases & active_mask;
expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next); expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
return expires_next; return expires_next;
...@@ -553,7 +573,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) ...@@ -553,7 +573,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{ {
ktime_t expires_next; ktime_t expires_next;
expires_next = __hrtimer_get_next_event(cpu_base); expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
if (skip_equal && expires_next == cpu_base->expires_next) if (skip_equal && expires_next == cpu_base->expires_next)
return; return;
...@@ -1074,7 +1094,7 @@ u64 hrtimer_get_next_event(void) ...@@ -1074,7 +1094,7 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags); raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base)) if (!__hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base); expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags); raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
...@@ -1248,10 +1268,10 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, ...@@ -1248,10 +1268,10 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
} }
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
unsigned long flags) unsigned long flags, unsigned int active_mask)
{ {
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
unsigned int active = cpu_base->active_bases; unsigned int active = cpu_base->active_bases & active_mask;
for_each_active_base(base, cpu_base, active) { for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *node; struct timerqueue_node *node;
...@@ -1314,10 +1334,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) ...@@ -1314,10 +1334,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
*/ */
cpu_base->expires_next = KTIME_MAX; cpu_base->expires_next = KTIME_MAX;
__hrtimer_run_queues(cpu_base, now, flags); __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
/* Reevaluate the clock bases for the next expiry */ /* Reevaluate the clock bases for the next expiry */
expires_next = __hrtimer_get_next_event(cpu_base); expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
/* /*
* Store the new expiry value so the migration code can verify * Store the new expiry value so the migration code can verify
* against it. * against it.
...@@ -1421,7 +1441,7 @@ void hrtimer_run_queues(void) ...@@ -1421,7 +1441,7 @@ void hrtimer_run_queues(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags); raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base); now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now, flags); __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags); raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册