hrtimer.c 42.5 KB
Newer Older
1 2 3
/*
 *  linux/kernel/hrtimer.c
 *
4
 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5
 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6
 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 *  High-resolution kernel timers
 *
 *  In contrast to the low-resolution timeout API implemented in
 *  kernel/timer.c, hrtimers provide finer resolution and accuracy
 *  depending on system configuration and capabilities.
 *
 *  These timers are currently used for:
 *   - itimers
 *   - POSIX timers
 *   - nanosleep
 *   - precise in-kernel timing
 *
 *  Started by: Thomas Gleixner and Ingo Molnar
 *
 *  Credits:
 *	based on kernel/timer.c
 *
25 26 27 28 29 30
 *	Help, testing, suggestions, bugfixes, improvements were
 *	provided by:
 *
 *	George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
 *	et. al.
 *
31 32 33 34 35 36 37 38 39
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/notifier.h>
#include <linux/syscalls.h>
40
#include <linux/kallsyms.h>
41
#include <linux/interrupt.h>
42
#include <linux/tick.h>
43 44
#include <linux/seq_file.h>
#include <linux/err.h>
45
#include <linux/debugobjects.h>
46 47 48 49 50 51 52 53

#include <asm/uaccess.h>

/**
 * ktime_get - get the monotonic time in ktime_t format
 *
 * returns the time in ktime_t format
 */
54
ktime_t ktime_get(void)
55 56 57 58 59 60 61
{
	struct timespec now;

	ktime_get_ts(&now);

	return timespec_to_ktime(now);
}
62
EXPORT_SYMBOL_GPL(ktime_get);
63 64 65 66 67 68

/**
 * ktime_get_real - get the real (wall-) time in ktime_t format
 *
 * returns the time in ktime_t format
 */
69
ktime_t ktime_get_real(void)
70 71 72 73 74 75 76 77 78 79 80 81
{
	struct timespec now;

	getnstimeofday(&now);

	return timespec_to_ktime(now);
}

EXPORT_SYMBOL_GPL(ktime_get_real);

/*
 * The timer bases:
82 83 84 85 86 87
 *
 * Note: If we want to add new timer bases, we have to skip the two
 * clock ids captured by the cpu-timers. We do this by holding empty
 * entries rather than doing math adjustment of the clock ids.
 * This ensures that we capture erroneous accesses to these clock ids
 * rather than moving them into the range of valid clock id's.
88
 */
89
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
90
{
91 92

	.clock_base =
93
	{
94 95 96
		{
			.index = CLOCK_REALTIME,
			.get_time = &ktime_get_real,
97
			.resolution = KTIME_LOW_RES,
98 99 100 101
		},
		{
			.index = CLOCK_MONOTONIC,
			.get_time = &ktime_get,
102
			.resolution = KTIME_LOW_RES,
103 104
		},
	}
105 106 107 108 109 110 111 112
};

/**
 * ktime_get_ts - get the monotonic clock in timespec format
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
113
 * in normalized timespec format in the variable pointed to by @ts.
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
 */
void ktime_get_ts(struct timespec *ts)
{
	struct timespec tomono;
	unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
		getnstimeofday(ts);
		tomono = wall_to_monotonic;

	} while (read_seqretry(&xtime_lock, seq));

	set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
				ts->tv_nsec + tomono.tv_nsec);
}
M
Matt Helsley 已提交
130
EXPORT_SYMBOL_GPL(ktime_get_ts);
131

132 133 134 135
/*
 * Get the coarse grained time at the softirq based on xtime and
 * wall_to_monotonic.
 */
136
static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
137 138
{
	ktime_t xtim, tomono;
139
	struct timespec xts, tom;
140 141 142 143
	unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
144
		xts = current_kernel_time();
145
		tom = wall_to_monotonic;
146 147
	} while (read_seqretry(&xtime_lock, seq));

J
john stultz 已提交
148
	xtim = timespec_to_ktime(xts);
149
	tomono = timespec_to_ktime(tom);
150 151 152
	base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
	base->clock_base[CLOCK_MONOTONIC].softirq_time =
		ktime_add(xtim, tomono);
153 154
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
/*
 * Functions and macros which are different for UP/SMP systems are kept in a
 * single place
 */
#ifdef CONFIG_SMP

/*
 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
 * means that all timers which are tied to this base via timer->base are
 * locked, and the base itself is locked too.
 *
 * So __run_timers/migrate_timers can safely modify all timers which could
 * be found on the lists/queues.
 *
 * When the timer's base is locked, and the timer removed from list, it is
 * possible to set timer->base = NULL and drop the lock: the timer remains
 * locked.
 */
173 174 175
static
struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
					     unsigned long *flags)
176
{
177
	struct hrtimer_clock_base *base;
178 179 180 181

	for (;;) {
		base = timer->base;
		if (likely(base != NULL)) {
182
			spin_lock_irqsave(&base->cpu_base->lock, *flags);
183 184 185
			if (likely(base == timer->base))
				return base;
			/* The timer has migrated to another CPU: */
186
			spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
187 188 189 190 191 192 193 194
		}
		cpu_relax();
	}
}

/*
 * Switch the timer base to the current CPU when possible.
 */
195 196
static inline struct hrtimer_clock_base *
switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
197
{
198 199
	struct hrtimer_clock_base *new_base;
	struct hrtimer_cpu_base *new_cpu_base;
200

201 202
	new_cpu_base = &__get_cpu_var(hrtimer_bases);
	new_base = &new_cpu_base->clock_base[base->index];
203 204 205 206 207 208 209 210 211 212 213

	if (base != new_base) {
		/*
		 * We are trying to schedule the timer on the local CPU.
		 * However we can't change timer's base while it is running,
		 * so we keep it on the same CPU. No hassle vs. reprogramming
		 * the event source in the high resolution case. The softirq
		 * code will take care of this when the timer function has
		 * completed. There is no conflict as we hold the lock until
		 * the timer is enqueued.
		 */
214
		if (unlikely(hrtimer_callback_running(timer)))
215 216 217 218
			return base;

		/* See the comment in lock_timer_base() */
		timer->base = NULL;
219 220
		spin_unlock(&base->cpu_base->lock);
		spin_lock(&new_base->cpu_base->lock);
221 222 223 224 225 226 227
		timer->base = new_base;
	}
	return new_base;
}

#else /* CONFIG_SMP */

228
static inline struct hrtimer_clock_base *
229 230
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
231
	struct hrtimer_clock_base *base = timer->base;
232

233
	spin_lock_irqsave(&base->cpu_base->lock, *flags);
234 235 236 237

	return base;
}

238
# define switch_hrtimer_base(t, b)	(b)
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268

#endif	/* !CONFIG_SMP */

/*
 * Functions for the union type storage format of ktime_t which are
 * too large for inlining:
 */
#if BITS_PER_LONG < 64
# ifndef CONFIG_KTIME_SCALAR
/**
 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
 * @kt:		addend
 * @nsec:	the scalar nsec value to add
 *
 * Returns the sum of kt and nsec in ktime_t format
 */
ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
{
	ktime_t tmp;

	if (likely(nsec < NSEC_PER_SEC)) {
		tmp.tv64 = nsec;
	} else {
		unsigned long rem = do_div(nsec, NSEC_PER_SEC);

		tmp = ktime_set((long)nsec, rem);
	}

	return ktime_add(kt, tmp);
}
269 270

EXPORT_SYMBOL_GPL(ktime_add_ns);
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

/**
 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
 * @kt:		minuend
 * @nsec:	the scalar nsec value to subtract
 *
 * Returns the subtraction of @nsec from @kt in ktime_t format
 */
ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
{
	ktime_t tmp;

	if (likely(nsec < NSEC_PER_SEC)) {
		tmp.tv64 = nsec;
	} else {
		unsigned long rem = do_div(nsec, NSEC_PER_SEC);

		tmp = ktime_set((long)nsec, rem);
	}

	return ktime_sub(kt, tmp);
}

EXPORT_SYMBOL_GPL(ktime_sub_ns);
295 296 297 298 299
# endif /* !CONFIG_KTIME_SCALAR */

/*
 * Divide a ktime value by a nanosecond value
 */
D
Davide Libenzi 已提交
300
u64 ktime_divns(const ktime_t kt, s64 div)
301
{
302
	u64 dclc;
303 304
	int sft = 0;

305
	dclc = ktime_to_ns(kt);
306 307 308 309 310 311 312 313
	/* Make sure the divisor is less than 2^32: */
	while (div >> 32) {
		sft++;
		div >>= 1;
	}
	dclc >>= sft;
	do_div(dclc, (unsigned long) div);

D
Davide Libenzi 已提交
314
	return dclc;
315 316 317
}
#endif /* BITS_PER_LONG >= 64 */

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
/*
 * Add two ktime values and do a safety check for overflow:
 */
ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
{
	ktime_t res = ktime_add(lhs, rhs);

	/*
	 * We use KTIME_SEC_MAX here, the maximum timeout which we can
	 * return to user space in a timespec:
	 */
	if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
		res = ktime_set(KTIME_SEC_MAX, 0);

	return res;
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS

static struct debug_obj_descr hrtimer_debug_descr;

/*
 * fixup_init is called when:
 * - an active object is initialized
 */
static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
{
	struct hrtimer *timer = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		hrtimer_cancel(timer);
		debug_object_init(timer, &hrtimer_debug_descr);
		return 1;
	default:
		return 0;
	}
}

/*
 * fixup_activate is called when:
 * - an active object is activated
 * - an unknown object is activated (might be a statically initialized object)
 */
static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
{
	switch (state) {

	case ODEBUG_STATE_NOTAVAILABLE:
		WARN_ON_ONCE(1);
		return 0;

	case ODEBUG_STATE_ACTIVE:
		WARN_ON(1);

	default:
		return 0;
	}
}

/*
 * fixup_free is called when:
 * - an active object is freed
 */
static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
{
	struct hrtimer *timer = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		hrtimer_cancel(timer);
		debug_object_free(timer, &hrtimer_debug_descr);
		return 1;
	default:
		return 0;
	}
}

static struct debug_obj_descr hrtimer_debug_descr = {
	.name		= "hrtimer",
	.fixup_init	= hrtimer_fixup_init,
	.fixup_activate	= hrtimer_fixup_activate,
	.fixup_free	= hrtimer_fixup_free,
};

static inline void debug_hrtimer_init(struct hrtimer *timer)
{
	debug_object_init(timer, &hrtimer_debug_descr);
}

static inline void debug_hrtimer_activate(struct hrtimer *timer)
{
	debug_object_activate(timer, &hrtimer_debug_descr);
}

static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
{
	debug_object_deactivate(timer, &hrtimer_debug_descr);
}

static inline void debug_hrtimer_free(struct hrtimer *timer)
{
	debug_object_free(timer, &hrtimer_debug_descr);
}

static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
			   enum hrtimer_mode mode);

void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
			   enum hrtimer_mode mode)
{
	debug_object_init_on_stack(timer, &hrtimer_debug_descr);
	__hrtimer_init(timer, clock_id, mode);
}

void destroy_hrtimer_on_stack(struct hrtimer *timer)
{
	debug_object_free(timer, &hrtimer_debug_descr);
}

#else
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS

/*
 * High resolution timer enabled ?
 */
static int hrtimer_hres_enabled __read_mostly  = 1;

/*
 * Enable / Disable high resolution mode
 */
static int __init setup_hrtimer_hres(char *str)
{
	if (!strcmp(str, "off"))
		hrtimer_hres_enabled = 0;
	else if (!strcmp(str, "on"))
		hrtimer_hres_enabled = 1;
	else
		return 0;
	return 1;
}

__setup("highres=", setup_hrtimer_hres);

/*
 * hrtimer_high_res_enabled - query, if the highres mode is enabled
 */
static inline int hrtimer_is_hres_enabled(void)
{
	return hrtimer_hres_enabled;
}

/*
 * Is the high resolution mode active ?
 */
static inline int hrtimer_hres_active(void)
{
	return __get_cpu_var(hrtimer_bases).hres_active;
}

/*
 * Reprogram the event source with checking both queues for the
 * next event
 * Called with interrupts disabled and base->lock held
 */
static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
{
	int i;
	struct hrtimer_clock_base *base = cpu_base->clock_base;
	ktime_t expires;

	cpu_base->expires_next.tv64 = KTIME_MAX;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
		struct hrtimer *timer;

		if (!base->first)
			continue;
		timer = rb_entry(base->first, struct hrtimer, node);
503
		expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
		if (expires.tv64 < cpu_base->expires_next.tv64)
			cpu_base->expires_next = expires;
	}

	if (cpu_base->expires_next.tv64 != KTIME_MAX)
		tick_program_event(cpu_base->expires_next, 1);
}

/*
 * Shared reprogramming for clock_realtime and clock_monotonic
 *
 * When a timer is enqueued and expires earlier than the already enqueued
 * timers, we have to check, whether it expires earlier than the timer for
 * which the clock event device was armed.
 *
 * Called with interrupts disabled and base->cpu_base.lock held
 */
static int hrtimer_reprogram(struct hrtimer *timer,
			     struct hrtimer_clock_base *base)
{
	ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
525
	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
526 527
	int res;

528
	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
529

530 531 532
	/*
	 * When the callback is running, we do not reprogram the clock event
	 * device. The timer callback is either running on a different CPU or
533
	 * the callback is executed in the hrtimer_interrupt context. The
534 535 536 537 538 539
	 * reprogramming is handled either by the softirq, which called the
	 * callback or at the end of the hrtimer_interrupt.
	 */
	if (hrtimer_callback_running(timer))
		return 0;

540 541 542 543 544 545 546 547 548
	/*
	 * CLOCK_REALTIME timer might be requested with an absolute
	 * expiry time which is less than base->offset. Nothing wrong
	 * about that, just avoid to call into the tick code, which
	 * has now objections against negative expiry values.
	 */
	if (expires.tv64 < 0)
		return -ETIME;

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	if (expires.tv64 >= expires_next->tv64)
		return 0;

	/*
	 * Clockevents returns -ETIME, when the event was in the past.
	 */
	res = tick_program_event(expires, 0);
	if (!IS_ERR_VALUE(res))
		*expires_next = expires;
	return res;
}


/*
 * Retrigger next event is called after clock was set
 *
 * Called with interrupts disabled via on_each_cpu()
 */
static void retrigger_next_event(void *arg)
{
	struct hrtimer_cpu_base *base;
	struct timespec realtime_offset;
	unsigned long seq;

	if (!hrtimer_hres_active())
		return;

	do {
		seq = read_seqbegin(&xtime_lock);
		set_normalized_timespec(&realtime_offset,
					-wall_to_monotonic.tv_sec,
					-wall_to_monotonic.tv_nsec);
	} while (read_seqretry(&xtime_lock, seq));

	base = &__get_cpu_var(hrtimer_bases);

	/* Adjust CLOCK_REALTIME offset */
	spin_lock(&base->lock);
	base->clock_base[CLOCK_REALTIME].offset =
		timespec_to_ktime(realtime_offset);

	hrtimer_force_reprogram(base);
	spin_unlock(&base->lock);
}

/*
 * Clock realtime was set
 *
 * Change the offset of the realtime clock vs. the monotonic
 * clock.
 *
 * We might have to reprogram the high resolution timer interrupt. On
 * SMP we call the architecture specific code to retrigger _all_ high
 * resolution timer interrupts. On UP we just disable interrupts and
 * call the high resolution interrupt code.
 */
void clock_was_set(void)
{
	/* Retrigger the CPU local events everywhere */
608
	on_each_cpu(retrigger_next_event, NULL, 1);
609 610
}

611 612 613 614 615 616
/*
 * During resume we might have to reprogram the high resolution timer
 * interrupt (on the local CPU):
 */
void hres_timers_resume(void)
{
617 618 619
	WARN_ONCE(!irqs_disabled(),
		  KERN_INFO "hres_timers_resume() called with IRQs enabled!");

620 621 622
	retrigger_next_event(NULL);
}

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
/*
 * Initialize the high resolution related parts of cpu_base
 */
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
{
	base->expires_next.tv64 = KTIME_MAX;
	base->hres_active = 0;
}

/*
 * Initialize the high resolution related parts of a hrtimer
 */
static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
{
}

639

640 641 642 643 644 645 646 647 648 649
/*
 * When High resolution timers are active, try to reprogram. Note, that in case
 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
 * check happens. The timer gets enqueued into the rbtree. The reprogramming
 * and expiry check is done in the hrtimer_interrupt or in the softirq.
 */
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
					    struct hrtimer_clock_base *base)
{
	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
650 651 652
		spin_unlock(&base->cpu_base->lock);
		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
		spin_lock(&base->cpu_base->lock);
653
		return 1;
654 655 656 657 658 659 660
	}
	return 0;
}

/*
 * Switch to high resolution mode
 */
661
static int hrtimer_switch_to_hres(void)
662
{
I
Ingo Molnar 已提交
663 664
	int cpu = smp_processor_id();
	struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
665 666 667
	unsigned long flags;

	if (base->hres_active)
668
		return 1;
669 670 671 672 673

	local_irq_save(flags);

	if (tick_init_highres()) {
		local_irq_restore(flags);
I
Ingo Molnar 已提交
674 675
		printk(KERN_WARNING "Could not switch to high resolution "
				    "mode on CPU %d\n", cpu);
676
		return 0;
677 678 679 680 681 682 683 684 685 686
	}
	base->hres_active = 1;
	base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
	base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;

	tick_setup_sched_timer();

	/* "Retrigger" the interrupt to get things going */
	retrigger_next_event(NULL);
	local_irq_restore(flags);
687
	printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
688
	       smp_processor_id());
689
	return 1;
690 691 692 693 694 695
}

#else

static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
696
static inline int hrtimer_switch_to_hres(void) { return 0; }
697 698 699 700 701 702 703 704 705 706 707
static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
					    struct hrtimer_clock_base *base)
{
	return 0;
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }

#endif /* CONFIG_HIGH_RES_TIMERS */

708 709 710 711 712 713 714 715 716 717 718 719
#ifdef CONFIG_TIMER_STATS
void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
{
	if (timer->start_site)
		return;

	timer->start_site = addr;
	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
	timer->start_pid = current->pid;
}
#endif

720
/*
721
 * Counterpart to lock_hrtimer_base above:
722 723 724 725
 */
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
726
	spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
727 728 729 730 731
}

/**
 * hrtimer_forward - forward the timer expiry
 * @timer:	hrtimer to forward
732
 * @now:	forward past this time
733 734 735
 * @interval:	the interval to forward
 *
 * Forward the timer expiry so it will expire in the future.
J
Jonathan Corbet 已提交
736
 * Returns the number of overruns.
737
 */
D
Davide Libenzi 已提交
738
u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
739
{
D
Davide Libenzi 已提交
740
	u64 orun = 1;
741
	ktime_t delta;
742

743
	delta = ktime_sub(now, hrtimer_get_expires(timer));
744 745 746 747

	if (delta.tv64 < 0)
		return 0;

748 749 750
	if (interval.tv64 < timer->base->resolution.tv64)
		interval.tv64 = timer->base->resolution.tv64;

751
	if (unlikely(delta.tv64 >= interval.tv64)) {
752
		s64 incr = ktime_to_ns(interval);
753 754

		orun = ktime_divns(delta, incr);
755 756
		hrtimer_add_expires_ns(timer, incr * orun);
		if (hrtimer_get_expires_tv64(timer) > now.tv64)
757 758 759 760 761 762 763
			return orun;
		/*
		 * This (and the ktime_add() below) is the
		 * correction for exact:
		 */
		orun++;
	}
764
	hrtimer_add_expires(timer, interval);
765 766 767

	return orun;
}
S
Stas Sergeev 已提交
768
EXPORT_SYMBOL_GPL(hrtimer_forward);
769 770 771 772 773 774

/*
 * enqueue_hrtimer - internal function to (re)start a timer
 *
 * The timer is inserted in expiry order. Insertion into the
 * red black tree is O(log(n)). Must hold the base lock.
775 776
 *
 * Returns 1 when the new timer is the leftmost timer in the tree.
777
 */
778 779
static int enqueue_hrtimer(struct hrtimer *timer,
			   struct hrtimer_clock_base *base)
780 781 782 783
{
	struct rb_node **link = &base->active.rb_node;
	struct rb_node *parent = NULL;
	struct hrtimer *entry;
I
Ingo Molnar 已提交
784
	int leftmost = 1;
785

786 787
	debug_hrtimer_activate(timer);

788 789 790 791 792 793 794 795 796 797
	/*
	 * Find the right place in the rbtree:
	 */
	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct hrtimer, node);
		/*
		 * We dont care about collisions. Nodes with
		 * the same expiry time stay together.
		 */
798 799
		if (hrtimer_get_expires_tv64(timer) <
				hrtimer_get_expires_tv64(entry)) {
800
			link = &(*link)->rb_left;
I
Ingo Molnar 已提交
801
		} else {
802
			link = &(*link)->rb_right;
I
Ingo Molnar 已提交
803 804
			leftmost = 0;
		}
805 806 807
	}

	/*
808 809
	 * Insert the timer to the rbtree and check whether it
	 * replaces the first pending timer
810
	 */
811
	if (leftmost)
812 813
		base->first = &timer->node;

814 815
	rb_link_node(&timer->node, parent, link);
	rb_insert_color(&timer->node, &base->active);
816 817 818 819 820
	/*
	 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
	 * state of a possibly running callback.
	 */
	timer->state |= HRTIMER_STATE_ENQUEUED;
821 822

	return leftmost;
823
}
824 825 826 827 828

/*
 * __remove_hrtimer - internal function to remove a timer
 *
 * Caller must hold the base lock.
829 830 831 832 833
 *
 * High resolution timer mode reprograms the clock event device when the
 * timer is the one which expires next. The caller can disable this by setting
 * reprogram to zero. This is useful, when the context does a reprogramming
 * anyway (e.g. timer interrupt)
834
 */
835
static void __remove_hrtimer(struct hrtimer *timer,
836
			     struct hrtimer_clock_base *base,
837
			     unsigned long newstate, int reprogram)
838
{
839
	if (timer->state & HRTIMER_STATE_ENQUEUED) {
840 841 842 843 844 845 846 847 848 849 850 851
		/*
		 * Remove the timer from the rbtree and replace the
		 * first entry pointer if necessary.
		 */
		if (base->first == &timer->node) {
			base->first = rb_next(&timer->node);
			/* Reprogram the clock event device. if enabled */
			if (reprogram && hrtimer_hres_active())
				hrtimer_force_reprogram(base->cpu_base);
		}
		rb_erase(&timer->node, &base->active);
	}
852
	timer->state = newstate;
853 854 855 856 857 858
}

/*
 * remove hrtimer, called with base lock held
 */
static inline int
859
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
860
{
861
	if (hrtimer_is_queued(timer)) {
862 863 864 865 866 867 868 869 870 871
		int reprogram;

		/*
		 * Remove the timer and force reprogramming when high
		 * resolution mode is active and the timer is on the current
		 * CPU. If we remove a timer on another CPU, reprogramming is
		 * skipped. The interrupt event on this CPU is fired and
		 * reprogramming happens in the interrupt handler. This is a
		 * rare case and less expensive than a smp call.
		 */
872
		debug_hrtimer_deactivate(timer);
873
		timer_stats_hrtimer_clear_start_info(timer);
874 875 876
		reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
		__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
				 reprogram);
877 878 879 880 881 882
		return 1;
	}
	return 0;
}

/**
T
Thomas Gleixner 已提交
883
 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
884 885
 * @timer:	the timer to be added
 * @tim:	expiry time
886
 * @delta_ns:	"slack" range for the timer
887 888 889 890 891 892 893
 * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
 *
 * Returns:
 *  0 on success
 *  1 when the timer was active
 */
int
894 895
hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
			const enum hrtimer_mode mode)
896
{
897
	struct hrtimer_clock_base *base, *new_base;
898
	unsigned long flags;
899
	int ret, leftmost;
900 901 902 903 904 905 906 907 908

	base = lock_hrtimer_base(timer, &flags);

	/* Remove an active timer from the queue: */
	ret = remove_hrtimer(timer, base);

	/* Switch the timer base, if necessary: */
	new_base = switch_hrtimer_base(timer, base);

909
	if (mode == HRTIMER_MODE_REL) {
910
		tim = ktime_add_safe(tim, new_base->get_time());
911 912 913 914 915 916 917 918
		/*
		 * CONFIG_TIME_LOW_RES is a temporary way for architectures
		 * to signal that they simply return xtime in
		 * do_gettimeoffset(). In this case we want to round up by
		 * resolution when starting a relative timer, to avoid short
		 * timeouts. This will go away with the GTOD framework.
		 */
#ifdef CONFIG_TIME_LOW_RES
919
		tim = ktime_add_safe(tim, base->resolution);
920 921
#endif
	}
922

923
	hrtimer_set_expires_range_ns(timer, tim, delta_ns);
924

925 926
	timer_stats_hrtimer_set_start_info(timer);

927 928
	leftmost = enqueue_hrtimer(timer, new_base);

929 930 931
	/*
	 * Only allow reprogramming if the new base is on this CPU.
	 * (it might still be on another CPU if the timer was pending)
932 933
	 *
	 * XXX send_remote_softirq() ?
934
	 */
935 936
	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
		hrtimer_enqueue_reprogram(timer, new_base);
937 938 939 940 941

	unlock_hrtimer_base(timer, &flags);

	return ret;
}
942 943 944
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);

/**
T
Thomas Gleixner 已提交
945
 * hrtimer_start - (re)start an hrtimer on the current CPU
946 947 948 949 950 951 952 953 954 955 956 957 958
 * @timer:	the timer to be added
 * @tim:	expiry time
 * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
 *
 * Returns:
 *  0 on success
 *  1 when the timer was active
 */
int
hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
{
	return hrtimer_start_range_ns(timer, tim, 0, mode);
}
959
EXPORT_SYMBOL_GPL(hrtimer_start);
960

961

962 963 964 965 966 967 968 969
/**
 * hrtimer_try_to_cancel - try to deactivate a timer
 * @timer:	hrtimer to stop
 *
 * Returns:
 *  0 when the timer was not active
 *  1 when the timer was active
 * -1 when the timer is currently excuting the callback function and
970
 *    cannot be stopped
971 972 973
 */
int hrtimer_try_to_cancel(struct hrtimer *timer)
{
974
	struct hrtimer_clock_base *base;
975 976 977 978 979
	unsigned long flags;
	int ret = -1;

	base = lock_hrtimer_base(timer, &flags);

980
	if (!hrtimer_callback_running(timer))
981 982 983 984 985 986 987
		ret = remove_hrtimer(timer, base);

	unlock_hrtimer_base(timer, &flags);

	return ret;

}
988
EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004

/**
 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
 * @timer:	the timer to be cancelled
 *
 * Returns:
 *  0 when the timer was not active
 *  1 when the timer was active
 */
int hrtimer_cancel(struct hrtimer *timer)
{
	for (;;) {
		int ret = hrtimer_try_to_cancel(timer);

		if (ret >= 0)
			return ret;
1005
		cpu_relax();
1006 1007
	}
}
1008
EXPORT_SYMBOL_GPL(hrtimer_cancel);
1009 1010 1011 1012 1013 1014 1015

/**
 * hrtimer_get_remaining - get remaining time for the timer
 * @timer:	the timer to read
 */
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
1016
	struct hrtimer_clock_base *base;
1017 1018 1019 1020
	unsigned long flags;
	ktime_t rem;

	base = lock_hrtimer_base(timer, &flags);
1021
	rem = hrtimer_expires_remaining(timer);
1022 1023 1024 1025
	unlock_hrtimer_base(timer, &flags);

	return rem;
}
1026
EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1027

1028
#ifdef CONFIG_NO_HZ
1029 1030 1031 1032 1033 1034 1035 1036
/**
 * hrtimer_get_next_event - get the time until next expiry event
 *
 * Returns the delta to the next expiry event or KTIME_MAX if no timer
 * is pending.
 */
ktime_t hrtimer_get_next_event(void)
{
1037 1038
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base = cpu_base->clock_base;
1039 1040 1041 1042
	ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
	unsigned long flags;
	int i;

1043 1044
	spin_lock_irqsave(&cpu_base->lock, flags);

1045 1046 1047
	if (!hrtimer_hres_active()) {
		for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
			struct hrtimer *timer;
1048

1049 1050
			if (!base->first)
				continue;
1051

1052
			timer = rb_entry(base->first, struct hrtimer, node);
1053
			delta.tv64 = hrtimer_get_expires_tv64(timer);
1054 1055 1056 1057
			delta = ktime_sub(delta, base->get_time());
			if (delta.tv64 < mindelta.tv64)
				mindelta.tv64 = delta.tv64;
		}
1058
	}
1059 1060 1061

	spin_unlock_irqrestore(&cpu_base->lock, flags);

1062 1063 1064 1065 1066 1067
	if (mindelta.tv64 < 0)
		mindelta.tv64 = 0;
	return mindelta;
}
#endif

1068 1069
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
			   enum hrtimer_mode mode)
1070
{
1071
	struct hrtimer_cpu_base *cpu_base;
1072

1073 1074
	memset(timer, 0, sizeof(struct hrtimer));

1075
	cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1076

1077
	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1078 1079
		clock_id = CLOCK_MONOTONIC;

1080
	timer->base = &cpu_base->clock_base[clock_id];
1081
	INIT_LIST_HEAD(&timer->cb_entry);
1082
	hrtimer_init_timer_hres(timer);
1083 1084 1085 1086 1087 1088

#ifdef CONFIG_TIMER_STATS
	timer->start_site = NULL;
	timer->start_pid = -1;
	memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
1089
}
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102

/**
 * hrtimer_init - initialize a timer to the given clock
 * @timer:	the timer to be initialized
 * @clock_id:	the clock to be used
 * @mode:	timer mode abs/rel
 */
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
		  enum hrtimer_mode mode)
{
	debug_hrtimer_init(timer);
	__hrtimer_init(timer, clock_id, mode);
}
1103
EXPORT_SYMBOL_GPL(hrtimer_init);
1104 1105 1106 1107 1108 1109

/**
 * hrtimer_get_res - get the timer resolution for a clock
 * @which_clock: which clock to query
 * @tp:		 pointer to timespec variable to store the resolution
 *
1110 1111
 * Store the resolution of the clock selected by @which_clock in the
 * variable pointed to by @tp.
1112 1113 1114
 */
int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
{
1115
	struct hrtimer_cpu_base *cpu_base;
1116

1117 1118
	cpu_base = &__raw_get_cpu_var(hrtimer_bases);
	*tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
1119 1120 1121

	return 0;
}
1122
EXPORT_SYMBOL_GPL(hrtimer_get_res);
1123

1124 1125 1126 1127 1128 1129 1130
static void __run_hrtimer(struct hrtimer *timer)
{
	struct hrtimer_clock_base *base = timer->base;
	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
	enum hrtimer_restart (*fn)(struct hrtimer *);
	int restart;

1131 1132
	WARN_ON(!irqs_disabled());

1133
	debug_hrtimer_deactivate(timer);
1134 1135 1136
	__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
	timer_stats_account_hrtimer(timer);
	fn = timer->function;
1137 1138 1139 1140 1141 1142 1143 1144 1145

	/*
	 * Because we run timers from hardirq context, there is no chance
	 * they get migrated to another cpu, therefore its safe to unlock
	 * the timer base.
	 */
	spin_unlock(&cpu_base->lock);
	restart = fn(timer);
	spin_lock(&cpu_base->lock);
1146 1147

	/*
T
Thomas Gleixner 已提交
1148 1149 1150
	 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
	 * we do not reprogramm the event hardware. Happens either in
	 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1151 1152 1153
	 */
	if (restart != HRTIMER_NORESTART) {
		BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1154
		enqueue_hrtimer(timer, base);
1155 1156 1157 1158
	}
	timer->state &= ~HRTIMER_STATE_CALLBACK;
}

1159 1160
#ifdef CONFIG_HIGH_RES_TIMERS

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
static int force_clock_reprogram;

/*
 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
 * is hanging, which could happen with something that slows the interrupt
 * such as the tracing. Then we force the clock reprogramming for each future
 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
 * threshold that we will overwrite.
 * The next tick event will be scheduled to 3 times we currently spend on
 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
 * 1/4 of their time to process the hrtimer interrupts. This is enough to
 * let it running without serious starvation.
 */

static inline void
hrtimer_interrupt_hanging(struct clock_event_device *dev,
			ktime_t try_time)
{
	force_clock_reprogram = 1;
	dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
	printk(KERN_WARNING "hrtimer: interrupt too slow, "
		"forcing clock min delta to %lu ns\n", dev->min_delta_ns);
}
1184 1185 1186 1187 1188 1189 1190 1191 1192
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base;
	ktime_t expires_next, now;
1193
	int nr_retries = 0;
1194
	int i;
1195 1196 1197 1198 1199 1200

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

 retry:
1201 1202 1203 1204
	/* 5 retries is enough to notice a hang */
	if (!(++nr_retries % 5))
		hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
	now = ktime_get();

	expires_next.tv64 = KTIME_MAX;

	base = cpu_base->clock_base;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		ktime_t basenow;
		struct rb_node *node;

		spin_lock(&cpu_base->lock);

		basenow = ktime_add(now, base->offset);

		while ((node = base->first)) {
			struct hrtimer *timer;

			timer = rb_entry(node, struct hrtimer, node);

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
			/*
			 * The immediate goal for using the softexpires is
			 * minimizing wakeups, not running timers at the
			 * earliest interrupt after their soft expiration.
			 * This allows us to avoid using a Priority Search
			 * Tree, which can answer a stabbing querry for
			 * overlapping intervals and instead use the simple
			 * BST we already have.
			 * We don't add extra wakeups by delaying timers that
			 * are right-of a not yet expired timer, because that
			 * timer will have to trigger a wakeup anyway.
			 */

			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1238 1239
				ktime_t expires;

1240
				expires = ktime_sub(hrtimer_get_expires(timer),
1241 1242 1243 1244 1245 1246
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

1247
			__run_hrtimer(timer);
1248 1249 1250 1251 1252 1253 1254 1255 1256
		}
		spin_unlock(&cpu_base->lock);
		base++;
	}

	cpu_base->expires_next = expires_next;

	/* Reprogramming necessary ? */
	if (expires_next.tv64 != KTIME_MAX) {
1257
		if (tick_program_event(expires_next, force_clock_reprogram))
1258 1259 1260 1261
			goto retry;
	}
}

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
/*
 * local version of hrtimer_peek_ahead_timers() called with interrupts
 * disabled.
 */
static void __hrtimer_peek_ahead_timers(void)
{
	struct tick_device *td;

	if (!hrtimer_hres_active())
		return;

	td = &__get_cpu_var(tick_cpu_device);
	if (td && td->evtdev)
		hrtimer_interrupt(td->evtdev);
}

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
/**
 * hrtimer_peek_ahead_timers -- run soft-expired timers now
 *
 * hrtimer_peek_ahead_timers will peek at the timer queue of
 * the current cpu and check if there are any timers for which
 * the soft expires time has passed. If any such timers exist,
 * they are run immediately and then removed from the timer queue.
 *
 */
void hrtimer_peek_ahead_timers(void)
{
1289
	unsigned long flags;
1290

1291
	local_irq_save(flags);
1292
	__hrtimer_peek_ahead_timers();
1293 1294 1295
	local_irq_restore(flags);
}

1296 1297 1298 1299 1300
static void run_hrtimer_softirq(struct softirq_action *h)
{
	hrtimer_peek_ahead_timers();
}

1301 1302 1303 1304 1305
#else /* CONFIG_HIGH_RES_TIMERS */

static inline void __hrtimer_peek_ahead_timers(void) { }

#endif	/* !CONFIG_HIGH_RES_TIMERS */
1306

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
/*
 * Called from timer softirq every jiffy, expire hrtimers:
 *
 * For HRT its the fall back code to run the softirq in the timer
 * softirq context in case the hrtimer initialization failed or has
 * not been done yet.
 */
void hrtimer_run_pending(void)
{
	if (hrtimer_hres_active())
		return;
1318

1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
	/*
	 * This _is_ ugly: We have to check in the softirq context,
	 * whether we can switch to highres and / or nohz mode. The
	 * clocksource switch happens in the timer interrupt with
	 * xtime_lock held. Notification from there only sets the
	 * check bit in the tick_oneshot code, otherwise we might
	 * deadlock vs. xtime_lock.
	 */
	if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
		hrtimer_switch_to_hres();
1329 1330
}

1331
/*
1332
 * Called from hardirq context every jiffy
1333
 */
1334
void hrtimer_run_queues(void)
1335
{
1336
	struct rb_node *node;
1337 1338 1339
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base;
	int index, gettime = 1;
1340

1341
	if (hrtimer_hres_active())
1342 1343
		return;

1344 1345
	for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
		base = &cpu_base->clock_base[index];
1346

1347
		if (!base->first)
1348
			continue;
1349

1350
		if (gettime) {
1351 1352
			hrtimer_get_softirq_time(cpu_base);
			gettime = 0;
1353
		}
1354

1355
		spin_lock(&cpu_base->lock);
1356

1357 1358
		while ((node = base->first)) {
			struct hrtimer *timer;
1359

1360
			timer = rb_entry(node, struct hrtimer, node);
1361 1362
			if (base->softirq_time.tv64 <=
					hrtimer_get_expires_tv64(timer))
1363 1364 1365 1366 1367 1368
				break;

			__run_hrtimer(timer);
		}
		spin_unlock(&cpu_base->lock);
	}
1369 1370
}

1371 1372 1373
/*
 * Sleep related functions:
 */
1374
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
{
	struct hrtimer_sleeper *t =
		container_of(timer, struct hrtimer_sleeper, timer);
	struct task_struct *task = t->task;

	t->task = NULL;
	if (task)
		wake_up_process(task);

	return HRTIMER_NORESTART;
}

1387
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1388 1389 1390 1391 1392
{
	sl->timer.function = hrtimer_wakeup;
	sl->task = task;
}

1393
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1394
{
1395
	hrtimer_init_sleeper(t, current);
1396

1397 1398
	do {
		set_current_state(TASK_INTERRUPTIBLE);
1399
		hrtimer_start_expires(&t->timer, mode);
P
Peter Zijlstra 已提交
1400 1401
		if (!hrtimer_active(&t->timer))
			t->task = NULL;
1402

1403 1404
		if (likely(t->task))
			schedule();
1405

1406
		hrtimer_cancel(&t->timer);
1407
		mode = HRTIMER_MODE_ABS;
1408 1409

	} while (t->task && !signal_pending(current));
1410

1411 1412
	__set_current_state(TASK_RUNNING);

1413
	return t->task == NULL;
1414 1415
}

1416 1417 1418 1419 1420
static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
{
	struct timespec rmt;
	ktime_t rem;

1421
	rem = hrtimer_expires_remaining(timer);
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	if (rem.tv64 <= 0)
		return 0;
	rmt = ktime_to_timespec(rem);

	if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
		return -EFAULT;

	return 1;
}

1432
long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1433
{
1434
	struct hrtimer_sleeper t;
1435
	struct timespec __user  *rmtp;
1436
	int ret = 0;
1437

1438 1439
	hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
				HRTIMER_MODE_ABS);
1440
	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1441

1442
	if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1443
		goto out;
1444

1445
	rmtp = restart->nanosleep.rmtp;
1446
	if (rmtp) {
1447
		ret = update_rmtp(&t.timer, rmtp);
1448
		if (ret <= 0)
1449
			goto out;
1450
	}
1451 1452

	/* The other values in restart are already filled in */
1453 1454 1455 1456
	ret = -ERESTART_RESTARTBLOCK;
out:
	destroy_hrtimer_on_stack(&t.timer);
	return ret;
1457 1458
}

1459
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1460 1461 1462
		       const enum hrtimer_mode mode, const clockid_t clockid)
{
	struct restart_block *restart;
1463
	struct hrtimer_sleeper t;
1464
	int ret = 0;
1465 1466 1467 1468 1469
	unsigned long slack;

	slack = current->timer_slack_ns;
	if (rt_task(current))
		slack = 0;
1470

1471
	hrtimer_init_on_stack(&t.timer, clockid, mode);
1472
	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1473
	if (do_nanosleep(&t, mode))
1474
		goto out;
1475

1476
	/* Absolute timers do not update the rmtp value and restart: */
1477 1478 1479 1480
	if (mode == HRTIMER_MODE_ABS) {
		ret = -ERESTARTNOHAND;
		goto out;
	}
1481

1482
	if (rmtp) {
1483
		ret = update_rmtp(&t.timer, rmtp);
1484
		if (ret <= 0)
1485
			goto out;
1486
	}
1487 1488

	restart = &current_thread_info()->restart_block;
1489
	restart->fn = hrtimer_nanosleep_restart;
1490 1491
	restart->nanosleep.index = t.timer.base->index;
	restart->nanosleep.rmtp = rmtp;
1492
	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1493

1494 1495 1496 1497
	ret = -ERESTART_RESTARTBLOCK;
out:
	destroy_hrtimer_on_stack(&t.timer);
	return ret;
1498 1499
}

1500 1501
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
		struct timespec __user *, rmtp)
1502
{
1503
	struct timespec tu;
1504 1505 1506 1507 1508 1509 1510

	if (copy_from_user(&tu, rqtp, sizeof(tu)))
		return -EFAULT;

	if (!timespec_valid(&tu))
		return -EINVAL;

1511
	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1512 1513
}

1514 1515 1516
/*
 * Functions related to boot-time initialization:
 */
R
Randy Dunlap 已提交
1517
static void __cpuinit init_hrtimers_cpu(int cpu)
1518
{
1519
	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1520 1521
	int i;

1522 1523 1524 1525 1526
	spin_lock_init(&cpu_base->lock);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
		cpu_base->clock_base[i].cpu_base = cpu_base;

1527
	hrtimer_init_hres(cpu_base);
1528 1529 1530 1531
}

#ifdef CONFIG_HOTPLUG_CPU

1532
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1533
				struct hrtimer_clock_base *new_base)
1534 1535 1536 1537 1538 1539
{
	struct hrtimer *timer;
	struct rb_node *node;

	while ((node = rb_first(&old_base->active))) {
		timer = rb_entry(node, struct hrtimer, node);
1540
		BUG_ON(hrtimer_callback_running(timer));
1541
		debug_hrtimer_deactivate(timer);
T
Thomas Gleixner 已提交
1542 1543 1544 1545 1546 1547 1548

		/*
		 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
		 * timer could be seen as !active and just vanish away
		 * under us on another CPU
		 */
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1549
		timer->base = new_base;
1550
		/*
T
Thomas Gleixner 已提交
1551 1552 1553 1554 1555 1556
		 * Enqueue the timers on the new cpu. This does not
		 * reprogram the event device in case the timer
		 * expires before the earliest on this CPU, but we run
		 * hrtimer_interrupt after we migrated everything to
		 * sort out already expired timers and reprogram the
		 * event device.
1557
		 */
1558
		enqueue_hrtimer(timer, new_base);
1559

T
Thomas Gleixner 已提交
1560 1561
		/* Clear the migration state bit */
		timer->state &= ~HRTIMER_STATE_MIGRATE;
1562 1563 1564
	}
}

1565
static void migrate_hrtimers(int scpu)
1566
{
1567
	struct hrtimer_cpu_base *old_base, *new_base;
1568
	int i;
1569

1570 1571
	BUG_ON(cpu_online(scpu));
	tick_cancel_sched_timer(scpu);
1572 1573 1574 1575

	local_irq_disable();
	old_base = &per_cpu(hrtimer_bases, scpu);
	new_base = &__get_cpu_var(hrtimer_bases);
1576 1577 1578 1579
	/*
	 * The caller is globally serialized and nobody else
	 * takes two locks at once, deadlock is not possible.
	 */
1580
	spin_lock(&new_base->lock);
1581
	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1582

1583
	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1584
		migrate_hrtimer_list(&old_base->clock_base[i],
1585
				     &new_base->clock_base[i]);
1586 1587
	}

1588
	spin_unlock(&old_base->lock);
1589
	spin_unlock(&new_base->lock);
1590

1591 1592 1593
	/* Check, if we got expired work to do */
	__hrtimer_peek_ahead_timers();
	local_irq_enable();
1594
}
1595

1596 1597
#endif /* CONFIG_HOTPLUG_CPU */

1598
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1599 1600
					unsigned long action, void *hcpu)
{
1601
	int scpu = (long)hcpu;
1602 1603 1604 1605

	switch (action) {

	case CPU_UP_PREPARE:
1606
	case CPU_UP_PREPARE_FROZEN:
1607
		init_hrtimers_cpu(scpu);
1608 1609 1610
		break;

#ifdef CONFIG_HOTPLUG_CPU
1611 1612 1613 1614
	case CPU_DYING:
	case CPU_DYING_FROZEN:
		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
		break;
1615
	case CPU_DEAD:
1616
	case CPU_DEAD_FROZEN:
1617
	{
1618
		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1619
		migrate_hrtimers(scpu);
1620
		break;
1621
	}
1622 1623 1624 1625 1626 1627 1628 1629 1630
#endif

	default:
		break;
	}

	return NOTIFY_OK;
}

1631
static struct notifier_block __cpuinitdata hrtimers_nb = {
1632 1633 1634 1635 1636 1637 1638 1639
	.notifier_call = hrtimer_cpu_notify,
};

void __init hrtimers_init(void)
{
	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
			  (void *)(long)smp_processor_id());
	register_cpu_notifier(&hrtimers_nb);
1640 1641 1642
#ifdef CONFIG_HIGH_RES_TIMERS
	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
#endif
1643 1644
}

1645
/**
1646
 * schedule_hrtimeout_range - sleep until timeout
1647
 * @expires:	timeout value (ktime_t)
1648
 * @delta:	slack in expires timeout (ktime_t)
1649 1650 1651 1652 1653 1654
 * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
 *
 * Make the current task sleep until the given expiry time has
 * elapsed. The routine will return immediately unless
 * the current task state has been set (see set_current_state()).
 *
1655 1656 1657 1658 1659
 * The @delta argument gives the kernel the freedom to schedule the
 * actual wakeup to a time that is both power and performance friendly.
 * The kernel give the normal best effort behavior for "@expires+@delta",
 * but may decide to fire the timer earlier, but no earlier than @expires.
 *
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
 * You can set the task state as follows -
 *
 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
 * pass before the routine returns.
 *
 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
 * delivered to the current task.
 *
 * The current task state is guaranteed to be TASK_RUNNING when this
 * routine returns.
 *
 * Returns 0 when the timer has expired otherwise -EINTR
 */
1673
int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
			       const enum hrtimer_mode mode)
{
	struct hrtimer_sleeper t;

	/*
	 * Optimize when a zero timeout value is given. It does not
	 * matter whether this is an absolute or a relative time.
	 */
	if (expires && !expires->tv64) {
		__set_current_state(TASK_RUNNING);
		return 0;
	}

	/*
	 * A NULL parameter means "inifinte"
	 */
	if (!expires) {
		schedule();
		__set_current_state(TASK_RUNNING);
		return -EINTR;
	}

	hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
1697
	hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1698 1699 1700

	hrtimer_init_sleeper(&t, current);

1701
	hrtimer_start_expires(&t.timer, mode);
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
	if (!hrtimer_active(&t.timer))
		t.task = NULL;

	if (likely(t.task))
		schedule();

	hrtimer_cancel(&t.timer);
	destroy_hrtimer_on_stack(&t.timer);

	__set_current_state(TASK_RUNNING);

	return !t.task ? 0 : -EINTR;
}
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);

/**
 * schedule_hrtimeout - sleep until timeout
 * @expires:	timeout value (ktime_t)
 * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
 *
 * Make the current task sleep until the given expiry time has
 * elapsed. The routine will return immediately unless
 * the current task state has been set (see set_current_state()).
 *
 * You can set the task state as follows -
 *
 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
 * pass before the routine returns.
 *
 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
 * delivered to the current task.
 *
 * The current task state is guaranteed to be TASK_RUNNING when this
 * routine returns.
 *
 * Returns 0 when the timer has expired otherwise -EINTR
 */
int __sched schedule_hrtimeout(ktime_t *expires,
			       const enum hrtimer_mode mode)
{
	return schedule_hrtimeout_range(expires, 0, mode);
}
1744
EXPORT_SYMBOL_GPL(schedule_hrtimeout);