hrtimer.c 21.2 KB
Newer Older
1 2 3
/*
 *  linux/kernel/hrtimer.c
 *
4 5 6
 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 *  Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
 *  Copyright(C) 2006	    Timesys Corp., Thomas Gleixner <tglx@timesys.com>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 *  High-resolution kernel timers
 *
 *  In contrast to the low-resolution timeout API implemented in
 *  kernel/timer.c, hrtimers provide finer resolution and accuracy
 *  depending on system configuration and capabilities.
 *
 *  These timers are currently used for:
 *   - itimers
 *   - POSIX timers
 *   - nanosleep
 *   - precise in-kernel timing
 *
 *  Started by: Thomas Gleixner and Ingo Molnar
 *
 *  Credits:
 *	based on kernel/timer.c
 *
25 26 27 28 29 30
 *	Help, testing, suggestions, bugfixes, improvements were
 *	provided by:
 *
 *	George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
 *	et. al.
 *
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/notifier.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>

#include <asm/uaccess.h>

/**
 * ktime_get - get the monotonic time in ktime_t format
 *
 * returns the time in ktime_t format
 */
static ktime_t ktime_get(void)
{
	struct timespec now;

	ktime_get_ts(&now);

	return timespec_to_ktime(now);
}

/**
 * ktime_get_real - get the real (wall-) time in ktime_t format
 *
 * returns the time in ktime_t format
 */
static ktime_t ktime_get_real(void)
{
	struct timespec now;

	getnstimeofday(&now);

	return timespec_to_ktime(now);
}

EXPORT_SYMBOL_GPL(ktime_get_real);

/*
 * The timer bases:
76 77 78 79 80 81
 *
 * Note: If we want to add new timer bases, we have to skip the two
 * clock ids captured by the cpu-timers. We do this by holding empty
 * entries rather than doing math adjustment of the clock ids.
 * This ensures that we capture erroneous accesses to these clock ids
 * rather than moving them into the range of valid clock id's.
82
 */
83
static DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
84
{
85 86

	.clock_base =
87
	{
88 89 90 91 92 93 94 95 96 97 98
		{
			.index = CLOCK_REALTIME,
			.get_time = &ktime_get_real,
			.resolution = KTIME_REALTIME_RES,
		},
		{
			.index = CLOCK_MONOTONIC,
			.get_time = &ktime_get,
			.resolution = KTIME_MONOTONIC_RES,
		},
	}
99 100 101 102 103 104 105 106
};

/**
 * ktime_get_ts - get the monotonic clock in timespec format
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
107
 * in normalized timespec format in the variable pointed to by @ts.
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
 */
void ktime_get_ts(struct timespec *ts)
{
	struct timespec tomono;
	unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
		getnstimeofday(ts);
		tomono = wall_to_monotonic;

	} while (read_seqretry(&xtime_lock, seq));

	set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
				ts->tv_nsec + tomono.tv_nsec);
}
M
Matt Helsley 已提交
124
EXPORT_SYMBOL_GPL(ktime_get_ts);
125

126 127 128 129
/*
 * Get the coarse grained time at the softirq based on xtime and
 * wall_to_monotonic.
 */
130
static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
131 132
{
	ktime_t xtim, tomono;
J
john stultz 已提交
133
	struct timespec xts;
134 135 136 137
	unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
J
john stultz 已提交
138 139 140 141 142
#ifdef CONFIG_NO_HZ
		getnstimeofday(&xts);
#else
		xts = xtime;
#endif
143 144
	} while (read_seqretry(&xtime_lock, seq));

J
john stultz 已提交
145 146
	xtim = timespec_to_ktime(xts);
	tomono = timespec_to_ktime(wall_to_monotonic);
147 148 149
	base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
	base->clock_base[CLOCK_MONOTONIC].softirq_time =
		ktime_add(xtim, tomono);
150 151
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * Helper function to check, whether the timer is on one of the queues
 */
static inline int hrtimer_is_queued(struct hrtimer *timer)
{
	return timer->state & HRTIMER_STATE_ENQUEUED;
}

/*
 * Helper function to check, whether the timer is running the callback
 * function
 */
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
	return timer->state & HRTIMER_STATE_CALLBACK;
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
/*
 * Functions and macros which are different for UP/SMP systems are kept in a
 * single place
 */
#ifdef CONFIG_SMP

/*
 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
 * means that all timers which are tied to this base via timer->base are
 * locked, and the base itself is locked too.
 *
 * So __run_timers/migrate_timers can safely modify all timers which could
 * be found on the lists/queues.
 *
 * When the timer's base is locked, and the timer removed from list, it is
 * possible to set timer->base = NULL and drop the lock: the timer remains
 * locked.
 */
187 188 189
static
struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
					     unsigned long *flags)
190
{
191
	struct hrtimer_clock_base *base;
192 193 194 195

	for (;;) {
		base = timer->base;
		if (likely(base != NULL)) {
196
			spin_lock_irqsave(&base->cpu_base->lock, *flags);
197 198 199
			if (likely(base == timer->base))
				return base;
			/* The timer has migrated to another CPU: */
200
			spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
201 202 203 204 205 206 207 208
		}
		cpu_relax();
	}
}

/*
 * Switch the timer base to the current CPU when possible.
 */
209 210
static inline struct hrtimer_clock_base *
switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
211
{
212 213
	struct hrtimer_clock_base *new_base;
	struct hrtimer_cpu_base *new_cpu_base;
214

215 216
	new_cpu_base = &__get_cpu_var(hrtimer_bases);
	new_base = &new_cpu_base->clock_base[base->index];
217 218 219 220 221 222 223 224 225 226 227

	if (base != new_base) {
		/*
		 * We are trying to schedule the timer on the local CPU.
		 * However we can't change timer's base while it is running,
		 * so we keep it on the same CPU. No hassle vs. reprogramming
		 * the event source in the high resolution case. The softirq
		 * code will take care of this when the timer function has
		 * completed. There is no conflict as we hold the lock until
		 * the timer is enqueued.
		 */
228
		if (unlikely(timer->state & HRTIMER_STATE_CALLBACK))
229 230 231 232
			return base;

		/* See the comment in lock_timer_base() */
		timer->base = NULL;
233 234
		spin_unlock(&base->cpu_base->lock);
		spin_lock(&new_base->cpu_base->lock);
235 236 237 238 239 240 241
		timer->base = new_base;
	}
	return new_base;
}

#else /* CONFIG_SMP */

242
static inline struct hrtimer_clock_base *
243 244
lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
245
	struct hrtimer_clock_base *base = timer->base;
246

247
	spin_lock_irqsave(&base->cpu_base->lock, *flags);
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

	return base;
}

#define switch_hrtimer_base(t, b)	(b)

#endif	/* !CONFIG_SMP */

/*
 * Functions for the union type storage format of ktime_t which are
 * too large for inlining:
 */
#if BITS_PER_LONG < 64
# ifndef CONFIG_KTIME_SCALAR
/**
 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
 * @kt:		addend
 * @nsec:	the scalar nsec value to add
 *
 * Returns the sum of kt and nsec in ktime_t format
 */
ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
{
	ktime_t tmp;

	if (likely(nsec < NSEC_PER_SEC)) {
		tmp.tv64 = nsec;
	} else {
		unsigned long rem = do_div(nsec, NSEC_PER_SEC);

		tmp = ktime_set((long)nsec, rem);
	}

	return ktime_add(kt, tmp);
}

#else /* CONFIG_KTIME_SCALAR */

# endif /* !CONFIG_KTIME_SCALAR */

/*
 * Divide a ktime value by a nanosecond value
 */
291
static unsigned long ktime_divns(const ktime_t kt, s64 div)
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
{
	u64 dclc, inc, dns;
	int sft = 0;

	dclc = dns = ktime_to_ns(kt);
	inc = div;
	/* Make sure the divisor is less than 2^32: */
	while (div >> 32) {
		sft++;
		div >>= 1;
	}
	dclc >>= sft;
	do_div(dclc, (unsigned long) div);

	return (unsigned long) dclc;
}

#else /* BITS_PER_LONG < 64 */
# define ktime_divns(kt, div)		(unsigned long)((kt).tv64 / (div))
#endif /* BITS_PER_LONG >= 64 */

313 314 315 316 317 318 319 320
/*
 * Timekeeping resumed notification
 */
void hrtimer_notify_resume(void)
{
	clock_was_set();
}

321 322 323 324 325 326
/*
 * Counterpart to lock_timer_base above:
 */
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
327
	spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
328 329 330 331 332
}

/**
 * hrtimer_forward - forward the timer expiry
 * @timer:	hrtimer to forward
333
 * @now:	forward past this time
334 335 336
 * @interval:	the interval to forward
 *
 * Forward the timer expiry so it will expire in the future.
J
Jonathan Corbet 已提交
337
 * Returns the number of overruns.
338 339
 */
unsigned long
340
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
341 342
{
	unsigned long orun = 1;
343
	ktime_t delta;
344 345 346 347 348 349

	delta = ktime_sub(now, timer->expires);

	if (delta.tv64 < 0)
		return 0;

350 351 352
	if (interval.tv64 < timer->base->resolution.tv64)
		interval.tv64 = timer->base->resolution.tv64;

353
	if (unlikely(delta.tv64 >= interval.tv64)) {
354
		s64 incr = ktime_to_ns(interval);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

		orun = ktime_divns(delta, incr);
		timer->expires = ktime_add_ns(timer->expires, incr * orun);
		if (timer->expires.tv64 > now.tv64)
			return orun;
		/*
		 * This (and the ktime_add() below) is the
		 * correction for exact:
		 */
		orun++;
	}
	timer->expires = ktime_add(timer->expires, interval);

	return orun;
}

/*
 * enqueue_hrtimer - internal function to (re)start a timer
 *
 * The timer is inserted in expiry order. Insertion into the
 * red black tree is O(log(n)). Must hold the base lock.
 */
377 378
static void enqueue_hrtimer(struct hrtimer *timer,
			    struct hrtimer_clock_base *base)
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
{
	struct rb_node **link = &base->active.rb_node;
	struct rb_node *parent = NULL;
	struct hrtimer *entry;

	/*
	 * Find the right place in the rbtree:
	 */
	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct hrtimer, node);
		/*
		 * We dont care about collisions. Nodes with
		 * the same expiry time stay together.
		 */
		if (timer->expires.tv64 < entry->expires.tv64)
			link = &(*link)->rb_left;
396
		else
397 398 399 400
			link = &(*link)->rb_right;
	}

	/*
401 402
	 * Insert the timer to the rbtree and check whether it
	 * replaces the first pending timer
403 404 405
	 */
	rb_link_node(&timer->node, parent, link);
	rb_insert_color(&timer->node, &base->active);
406 407 408 409 410
	/*
	 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
	 * state of a possibly running callback.
	 */
	timer->state |= HRTIMER_STATE_ENQUEUED;
411

412 413 414 415
	if (!base->first || timer->expires.tv64 <
	    rb_entry(base->first, struct hrtimer, node)->expires.tv64)
		base->first = &timer->node;
}
416 417 418 419 420 421

/*
 * __remove_hrtimer - internal function to remove a timer
 *
 * Caller must hold the base lock.
 */
422
static void __remove_hrtimer(struct hrtimer *timer,
423 424
			     struct hrtimer_clock_base *base,
			     unsigned long newstate)
425 426
{
	/*
427 428
	 * Remove the timer from the rbtree and replace the
	 * first entry pointer if necessary.
429
	 */
430 431
	if (base->first == &timer->node)
		base->first = rb_next(&timer->node);
432
	rb_erase(&timer->node, &base->active);
433
	timer->state = newstate;
434 435 436 437 438 439
}

/*
 * remove hrtimer, called with base lock held
 */
static inline int
440
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
441
{
442 443
	if (hrtimer_is_queued(timer)) {
		__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE);
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
		return 1;
	}
	return 0;
}

/**
 * hrtimer_start - (re)start an relative timer on the current CPU
 * @timer:	the timer to be added
 * @tim:	expiry time
 * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
 *
 * Returns:
 *  0 on success
 *  1 when the timer was active
 */
int
hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
{
462
	struct hrtimer_clock_base *base, *new_base;
463 464 465 466 467 468 469 470 471 472 473
	unsigned long flags;
	int ret;

	base = lock_hrtimer_base(timer, &flags);

	/* Remove an active timer from the queue: */
	ret = remove_hrtimer(timer, base);

	/* Switch the timer base, if necessary: */
	new_base = switch_hrtimer_base(timer, base);

474
	if (mode == HRTIMER_MODE_REL) {
475
		tim = ktime_add(tim, new_base->get_time());
476 477 478 479 480 481 482 483 484 485 486
		/*
		 * CONFIG_TIME_LOW_RES is a temporary way for architectures
		 * to signal that they simply return xtime in
		 * do_gettimeoffset(). In this case we want to round up by
		 * resolution when starting a relative timer, to avoid short
		 * timeouts. This will go away with the GTOD framework.
		 */
#ifdef CONFIG_TIME_LOW_RES
		tim = ktime_add(tim, base->resolution);
#endif
	}
487 488 489 490 491 492 493 494
	timer->expires = tim;

	enqueue_hrtimer(timer, new_base);

	unlock_hrtimer_base(timer, &flags);

	return ret;
}
495
EXPORT_SYMBOL_GPL(hrtimer_start);
496 497 498 499 500 501 502 503 504

/**
 * hrtimer_try_to_cancel - try to deactivate a timer
 * @timer:	hrtimer to stop
 *
 * Returns:
 *  0 when the timer was not active
 *  1 when the timer was active
 * -1 when the timer is currently excuting the callback function and
505
 *    cannot be stopped
506 507 508
 */
int hrtimer_try_to_cancel(struct hrtimer *timer)
{
509
	struct hrtimer_clock_base *base;
510 511 512 513 514
	unsigned long flags;
	int ret = -1;

	base = lock_hrtimer_base(timer, &flags);

515
	if (!hrtimer_callback_running(timer))
516 517 518 519 520 521 522
		ret = remove_hrtimer(timer, base);

	unlock_hrtimer_base(timer, &flags);

	return ret;

}
523
EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539

/**
 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
 * @timer:	the timer to be cancelled
 *
 * Returns:
 *  0 when the timer was not active
 *  1 when the timer was active
 */
int hrtimer_cancel(struct hrtimer *timer)
{
	for (;;) {
		int ret = hrtimer_try_to_cancel(timer);

		if (ret >= 0)
			return ret;
540
		cpu_relax();
541 542
	}
}
543
EXPORT_SYMBOL_GPL(hrtimer_cancel);
544 545 546 547 548 549 550

/**
 * hrtimer_get_remaining - get remaining time for the timer
 * @timer:	the timer to read
 */
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
551
	struct hrtimer_clock_base *base;
552 553 554 555
	unsigned long flags;
	ktime_t rem;

	base = lock_hrtimer_base(timer, &flags);
556
	rem = ktime_sub(timer->expires, base->get_time());
557 558 559 560
	unlock_hrtimer_base(timer, &flags);

	return rem;
}
561
EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
562

563
#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
564 565 566 567 568 569 570 571
/**
 * hrtimer_get_next_event - get the time until next expiry event
 *
 * Returns the delta to the next expiry event or KTIME_MAX if no timer
 * is pending.
 */
ktime_t hrtimer_get_next_event(void)
{
572 573
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	struct hrtimer_clock_base *base = cpu_base->clock_base;
574 575 576 577
	ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
	unsigned long flags;
	int i;

578 579 580
	spin_lock_irqsave(&cpu_base->lock, flags);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
581 582
		struct hrtimer *timer;

583
		if (!base->first)
584
			continue;
585

586 587 588 589 590 591
		timer = rb_entry(base->first, struct hrtimer, node);
		delta.tv64 = timer->expires.tv64;
		delta = ktime_sub(delta, base->get_time());
		if (delta.tv64 < mindelta.tv64)
			mindelta.tv64 = delta.tv64;
	}
592 593 594

	spin_unlock_irqrestore(&cpu_base->lock, flags);

595 596 597 598 599 600
	if (mindelta.tv64 < 0)
		mindelta.tv64 = 0;
	return mindelta;
}
#endif

601
/**
602 603
 * hrtimer_init - initialize a timer to the given clock
 * @timer:	the timer to be initialized
604
 * @clock_id:	the clock to be used
605
 * @mode:	timer mode abs/rel
606
 */
607 608
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
		  enum hrtimer_mode mode)
609
{
610
	struct hrtimer_cpu_base *cpu_base;
611

612 613
	memset(timer, 0, sizeof(struct hrtimer));

614
	cpu_base = &__raw_get_cpu_var(hrtimer_bases);
615

616
	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
617 618
		clock_id = CLOCK_MONOTONIC;

619
	timer->base = &cpu_base->clock_base[clock_id];
620
}
621
EXPORT_SYMBOL_GPL(hrtimer_init);
622 623 624 625 626 627

/**
 * hrtimer_get_res - get the timer resolution for a clock
 * @which_clock: which clock to query
 * @tp:		 pointer to timespec variable to store the resolution
 *
628 629
 * Store the resolution of the clock selected by @which_clock in the
 * variable pointed to by @tp.
630 631 632
 */
int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
{
633
	struct hrtimer_cpu_base *cpu_base;
634

635 636
	cpu_base = &__raw_get_cpu_var(hrtimer_bases);
	*tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
637 638 639

	return 0;
}
640
EXPORT_SYMBOL_GPL(hrtimer_get_res);
641 642 643 644

/*
 * Expire the per base hrtimer-queue:
 */
645 646
static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
				     int index)
647
{
648
	struct rb_node *node;
649
	struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
650

651 652 653
	if (!base->first)
		return;

654 655 656
	if (base->get_softirq_time)
		base->softirq_time = base->get_softirq_time();

657
	spin_lock_irq(&cpu_base->lock);
658

659
	while ((node = base->first)) {
660
		struct hrtimer *timer;
661
		enum hrtimer_restart (*fn)(struct hrtimer *);
662 663
		int restart;

664
		timer = rb_entry(node, struct hrtimer, node);
665
		if (base->softirq_time.tv64 <= timer->expires.tv64)
666 667 668
			break;

		fn = timer->function;
669
		__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK);
670
		spin_unlock_irq(&cpu_base->lock);
671

672
		restart = fn(timer);
673

674
		spin_lock_irq(&cpu_base->lock);
675

676
		timer->state &= ~HRTIMER_STATE_CALLBACK;
677 678
		if (restart != HRTIMER_NORESTART) {
			BUG_ON(hrtimer_active(timer));
679
			enqueue_hrtimer(timer, base);
680
		}
681
	}
682
	spin_unlock_irq(&cpu_base->lock);
683 684 685 686 687 688 689
}

/*
 * Called from timer softirq every jiffy, expire hrtimers:
 */
void hrtimer_run_queues(void)
{
690
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
691 692
	int i;

693
	hrtimer_get_softirq_time(cpu_base);
694

695 696
	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
		run_hrtimer_queue(cpu_base, i);
697 698
}

699 700 701
/*
 * Sleep related functions:
 */
702
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
703 704 705 706 707 708 709 710 711 712 713 714
{
	struct hrtimer_sleeper *t =
		container_of(timer, struct hrtimer_sleeper, timer);
	struct task_struct *task = t->task;

	t->task = NULL;
	if (task)
		wake_up_process(task);

	return HRTIMER_NORESTART;
}

715
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
716 717 718 719 720
{
	sl->timer.function = hrtimer_wakeup;
	sl->task = task;
}

721
static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
722
{
723
	hrtimer_init_sleeper(t, current);
724

725 726 727 728 729 730
	do {
		set_current_state(TASK_INTERRUPTIBLE);
		hrtimer_start(&t->timer, t->timer.expires, mode);

		schedule();

731
		hrtimer_cancel(&t->timer);
732
		mode = HRTIMER_MODE_ABS;
733 734

	} while (t->task && !signal_pending(current));
735

736
	return t->task == NULL;
737 738
}

739
long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
740
{
741
	struct hrtimer_sleeper t;
742 743
	struct timespec __user *rmtp;
	struct timespec tu;
744
	ktime_t time;
745 746 747

	restart->fn = do_no_restart_syscall;

748
	hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
749
	t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
750

751
	if (do_nanosleep(&t, HRTIMER_MODE_ABS))
752 753
		return 0;

754
	rmtp = (struct timespec __user *) restart->arg1;
755 756 757 758 759 760 761 762
	if (rmtp) {
		time = ktime_sub(t.timer.expires, t.timer.base->get_time());
		if (time.tv64 <= 0)
			return 0;
		tu = ktime_to_timespec(time);
		if (copy_to_user(rmtp, &tu, sizeof(tu)))
			return -EFAULT;
	}
763

764
	restart->fn = hrtimer_nanosleep_restart;
765 766 767 768 769 770 771 772 773

	/* The other values in restart are already filled in */
	return -ERESTART_RESTARTBLOCK;
}

long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
		       const enum hrtimer_mode mode, const clockid_t clockid)
{
	struct restart_block *restart;
774
	struct hrtimer_sleeper t;
775 776 777
	struct timespec tu;
	ktime_t rem;

778 779 780
	hrtimer_init(&t.timer, clockid, mode);
	t.timer.expires = timespec_to_ktime(*rqtp);
	if (do_nanosleep(&t, mode))
781 782
		return 0;

783
	/* Absolute timers do not update the rmtp value and restart: */
784
	if (mode == HRTIMER_MODE_ABS)
785 786
		return -ERESTARTNOHAND;

787 788 789 790 791 792 793 794
	if (rmtp) {
		rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
		if (rem.tv64 <= 0)
			return 0;
		tu = ktime_to_timespec(rem);
		if (copy_to_user(rmtp, &tu, sizeof(tu)))
			return -EFAULT;
	}
795 796

	restart = &current_thread_info()->restart_block;
797 798 799 800 801
	restart->fn = hrtimer_nanosleep_restart;
	restart->arg0 = (unsigned long) t.timer.base->index;
	restart->arg1 = (unsigned long) rmtp;
	restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;
	restart->arg3 = t.timer.expires.tv64 >> 32;
802 803 804 805

	return -ERESTART_RESTARTBLOCK;
}

806 807 808 809 810 811 812 813 814 815 816
asmlinkage long
sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
{
	struct timespec tu;

	if (copy_from_user(&tu, rqtp, sizeof(tu)))
		return -EFAULT;

	if (!timespec_valid(&tu))
		return -EINVAL;

817
	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
818 819
}

820 821 822 823 824
/*
 * Functions related to boot-time initialization:
 */
static void __devinit init_hrtimers_cpu(int cpu)
{
825
	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
826 827
	int i;

828 829 830 831 832 833
	spin_lock_init(&cpu_base->lock);
	lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
		cpu_base->clock_base[i].cpu_base = cpu_base;

834 835 836 837
}

#ifdef CONFIG_HOTPLUG_CPU

838 839
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
				struct hrtimer_clock_base *new_base)
840 841 842 843 844 845
{
	struct hrtimer *timer;
	struct rb_node *node;

	while ((node = rb_first(&old_base->active))) {
		timer = rb_entry(node, struct hrtimer, node);
846 847
		BUG_ON(timer->state & HRTIMER_STATE_CALLBACK);
		__remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE);
848 849 850 851 852 853 854
		timer->base = new_base;
		enqueue_hrtimer(timer, new_base);
	}
}

static void migrate_hrtimers(int cpu)
{
855
	struct hrtimer_cpu_base *old_base, *new_base;
856 857 858
	int i;

	BUG_ON(cpu_online(cpu));
859 860
	old_base = &per_cpu(hrtimer_bases, cpu);
	new_base = &get_cpu_var(hrtimer_bases);
861 862 863

	local_irq_disable();

864 865
	spin_lock(&new_base->lock);
	spin_lock(&old_base->lock);
866

867 868 869
	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		migrate_hrtimer_list(&old_base->clock_base[i],
				     &new_base->clock_base[i]);
870
	}
871 872
	spin_unlock(&old_base->lock);
	spin_unlock(&new_base->lock);
873 874 875 876 877 878

	local_irq_enable();
	put_cpu_var(hrtimer_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */

879
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
					unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;

	switch (action) {

	case CPU_UP_PREPARE:
		init_hrtimers_cpu(cpu);
		break;

#ifdef CONFIG_HOTPLUG_CPU
	case CPU_DEAD:
		migrate_hrtimers(cpu);
		break;
#endif

	default:
		break;
	}

	return NOTIFY_OK;
}

903
static struct notifier_block __cpuinitdata hrtimers_nb = {
904 905 906 907 908 909 910 911 912 913
	.notifier_call = hrtimer_cpu_notify,
};

void __init hrtimers_init(void)
{
	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
			  (void *)(long)smp_processor_id());
	register_cpu_notifier(&hrtimers_nb);
}