提交 a59855cd 编写于 作者: R Rafael J. Wysocki

time: hrtimer: Introduce hrtimer_next_event_without()

The next set of changes will need to compute the time to the next
hrtimer event over all hrtimers except for the scheduler tick one.

To that end introduce a new helper function,
hrtimer_next_event_without(), for computing the time until the next
hrtimer event over all timers except for one and modify the underlying
code in __hrtimer_next_event_base() to prepare it for being called by
that new function.

No intentional changes in functionality.
Signed-off-by: NRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: NFrederic Weisbecker <frederic@kernel.org>
上级 23a8d888
...@@ -426,6 +426,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) ...@@ -426,6 +426,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
} }
extern u64 hrtimer_get_next_event(void); extern u64 hrtimer_get_next_event(void);
extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer); extern bool hrtimer_active(const struct hrtimer *timer);
......
...@@ -490,6 +490,7 @@ __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) ...@@ -490,6 +490,7 @@ __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
while ((base = __next_base((cpu_base), &(active)))) while ((base = __next_base((cpu_base), &(active))))
static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
const struct hrtimer *exclude,
unsigned int active, unsigned int active,
ktime_t expires_next) ktime_t expires_next)
{ {
...@@ -502,9 +503,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, ...@@ -502,9 +503,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
next = timerqueue_getnext(&base->active); next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node); timer = container_of(next, struct hrtimer, node);
if (timer == exclude) {
/* Get to the next timer in the queue. */
struct rb_node *rbn = rb_next(&next->node);
next = rb_entry_safe(rbn, struct timerqueue_node, node);
if (!next)
continue;
timer = container_of(next, struct hrtimer, node);
}
expires = ktime_sub(hrtimer_get_expires(timer), base->offset); expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires < expires_next) { if (expires < expires_next) {
expires_next = expires; expires_next = expires;
/* Skip cpu_base update if a timer is being excluded. */
if (exclude)
continue;
if (timer->is_soft) if (timer->is_soft)
cpu_base->softirq_next_timer = timer; cpu_base->softirq_next_timer = timer;
else else
...@@ -548,7 +564,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_ ...@@ -548,7 +564,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
cpu_base->softirq_next_timer = NULL; cpu_base->softirq_next_timer = NULL;
expires_next = __hrtimer_next_event_base(cpu_base, active, KTIME_MAX); expires_next = __hrtimer_next_event_base(cpu_base, NULL,
active, KTIME_MAX);
next_timer = cpu_base->softirq_next_timer; next_timer = cpu_base->softirq_next_timer;
} }
...@@ -556,7 +573,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_ ...@@ -556,7 +573,8 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
if (active_mask & HRTIMER_ACTIVE_HARD) { if (active_mask & HRTIMER_ACTIVE_HARD) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
cpu_base->next_timer = next_timer; cpu_base->next_timer = next_timer;
expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next); expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
expires_next);
} }
return expires_next; return expires_next;
...@@ -1202,6 +1220,39 @@ u64 hrtimer_get_next_event(void) ...@@ -1202,6 +1220,39 @@ u64 hrtimer_get_next_event(void)
return expires; return expires;
} }
/**
* hrtimer_next_event_without - time until next expiry event w/o one timer
* @exclude: timer to exclude
*
* Returns the next expiry time over all timers except for the @exclude one or
* KTIME_MAX if none of them is pending.
*/
u64 hrtimer_next_event_without(const struct hrtimer *exclude)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
u64 expires = KTIME_MAX;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (__hrtimer_hres_active(cpu_base)) {
unsigned int active;
if (!cpu_base->softirq_activated) {
active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
expires = __hrtimer_next_event_base(cpu_base, exclude,
active, KTIME_MAX);
}
active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
expires = __hrtimer_next_event_base(cpu_base, exclude, active,
expires);
}
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
return expires;
}
#endif #endif
static inline int hrtimer_clockid_to_base(clockid_t clock_id) static inline int hrtimer_clockid_to_base(clockid_t clock_id)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册