timer.c 51.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *  linux/kernel/timer.c
 *
4
 *  Kernel internal timers
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
 *
 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
 *              serialize accesses to xtime/lost_ticks).
 *                              Copyright (C) 1998  Andrea Arcangeli
 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
 *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
 */

#include <linux/kernel_stat.h>
23
#include <linux/export.h>
L
Linus Torvalds 已提交
24 25 26 27 28
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/swap.h>
29
#include <linux/pid_namespace.h>
L
Linus Torvalds 已提交
30 31 32 33 34 35 36
#include <linux/notifier.h>
#include <linux/thread_info.h>
#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/posix-timers.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
A
Adrian Bunk 已提交
37
#include <linux/delay.h>
38
#include <linux/tick.h>
39
#include <linux/kallsyms.h>
40
#include <linux/irq_work.h>
41
#include <linux/sched.h>
42
#include <linux/sched/sysctl.h>
43
#include <linux/slab.h>
44
#include <linux/compat.h>
L
Linus Torvalds 已提交
45 46 47 48 49 50 51

#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/div64.h>
#include <asm/timex.h>
#include <asm/io.h>

52 53
#include "tick-internal.h"

54 55 56
#define CREATE_TRACE_POINTS
#include <trace/events/timer.h>

A
Andi Kleen 已提交
57
__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
T
Thomas Gleixner 已提交
58 59 60

EXPORT_SYMBOL(jiffies_64);

L
Linus Torvalds 已提交
61
/*
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
 * level has a different granularity.
 *
 * The level granularity is:		LVL_CLK_DIV ^ lvl
 * The level clock frequency is:	HZ / (LVL_CLK_DIV ^ level)
 *
 * The array level of a newly armed timer depends on the relative expiry
 * time. The farther the expiry time is away the higher the array level and
 * therefor the granularity becomes.
 *
 * Contrary to the original timer wheel implementation, which aims for 'exact'
 * expiry of the timers, this implementation removes the need for recascading
 * the timers into the lower array levels. The previous 'classic' timer wheel
 * implementation of the kernel already violated the 'exact' expiry by adding
 * slack to the expiry time to provide batched expiration. The granularity
 * levels provide implicit batching.
 *
 * This is an optimization of the original timer wheel implementation for the
 * majority of the timer wheel use cases: timeouts. The vast majority of
 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
 * the timeout expires it indicates that normal operation is disturbed, so it
 * does not matter much whether the timeout comes with a slight delay.
 *
 * The only exception to this are networking timers with a small expiry
 * time. They rely on the granularity. Those fit into the first wheel level,
 * which has HZ granularity.
 *
 * We don't have cascading anymore. timers with a expiry time above the
 * capacity of the last wheel level are force expired at the maximum timeout
 * value of the last wheel level. From data sampling we know that the maximum
 * value observed is 5 days (network connection tracking), so this should not
 * be an issue.
 *
 * The currently chosen array constants values are a good compromise between
 * array size and granularity.
 *
 * This results in the following granularity and range levels:
 *
 * HZ 1000 steps
 * Level Offset  Granularity            Range
 *  0      0         1 ms                0 ms -         63 ms
 *  1     64         8 ms               64 ms -        511 ms
 *  2    128        64 ms              512 ms -       4095 ms (512ms - ~4s)
 *  3    192       512 ms             4096 ms -      32767 ms (~4s - ~32s)
 *  4    256      4096 ms (~4s)      32768 ms -     262143 ms (~32s - ~4m)
 *  5    320     32768 ms (~32s)    262144 ms -    2097151 ms (~4m - ~34m)
 *  6    384    262144 ms (~4m)    2097152 ms -   16777215 ms (~34m - ~4h)
 *  7    448   2097152 ms (~34m)  16777216 ms -  134217727 ms (~4h - ~1d)
 *  8    512  16777216 ms (~4h)  134217728 ms - 1073741822 ms (~1d - ~12d)
 *
 * HZ  300
 * Level Offset  Granularity            Range
 *  0	   0         3 ms                0 ms -        210 ms
 *  1	  64        26 ms              213 ms -       1703 ms (213ms - ~1s)
 *  2	 128       213 ms             1706 ms -      13650 ms (~1s - ~13s)
 *  3	 192      1706 ms (~1s)      13653 ms -     109223 ms (~13s - ~1m)
 *  4	 256     13653 ms (~13s)    109226 ms -     873810 ms (~1m - ~14m)
 *  5	 320    109226 ms (~1m)     873813 ms -    6990503 ms (~14m - ~1h)
 *  6	 384    873813 ms (~14m)   6990506 ms -   55924050 ms (~1h - ~15h)
 *  7	 448   6990506 ms (~1h)   55924053 ms -  447392423 ms (~15h - ~5d)
 *  8    512  55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
 *
 * HZ  250
 * Level Offset  Granularity            Range
 *  0	   0         4 ms                0 ms -        255 ms
 *  1	  64        32 ms              256 ms -       2047 ms (256ms - ~2s)
 *  2	 128       256 ms             2048 ms -      16383 ms (~2s - ~16s)
 *  3	 192      2048 ms (~2s)      16384 ms -     131071 ms (~16s - ~2m)
 *  4	 256     16384 ms (~16s)    131072 ms -    1048575 ms (~2m - ~17m)
 *  5	 320    131072 ms (~2m)    1048576 ms -    8388607 ms (~17m - ~2h)
 *  6	 384   1048576 ms (~17m)   8388608 ms -   67108863 ms (~2h - ~18h)
 *  7	 448   8388608 ms (~2h)   67108864 ms -  536870911 ms (~18h - ~6d)
 *  8    512  67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
 *
 * HZ  100
 * Level Offset  Granularity            Range
 *  0	   0         10 ms               0 ms -        630 ms
 *  1	  64         80 ms             640 ms -       5110 ms (640ms - ~5s)
 *  2	 128        640 ms            5120 ms -      40950 ms (~5s - ~40s)
 *  3	 192       5120 ms (~5s)     40960 ms -     327670 ms (~40s - ~5m)
 *  4	 256      40960 ms (~40s)   327680 ms -    2621430 ms (~5m - ~43m)
 *  5	 320     327680 ms (~5m)   2621440 ms -   20971510 ms (~43m - ~5h)
 *  6	 384    2621440 ms (~43m) 20971520 ms -  167772150 ms (~5h - ~1d)
 *  7	 448   20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
L
Linus Torvalds 已提交
147 148
 */

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/* Clock divisor for the next level */
#define LVL_CLK_SHIFT	3
#define LVL_CLK_DIV	(1UL << LVL_CLK_SHIFT)
#define LVL_CLK_MASK	(LVL_CLK_DIV - 1)
#define LVL_SHIFT(n)	((n) * LVL_CLK_SHIFT)
#define LVL_GRAN(n)	(1UL << LVL_SHIFT(n))

/*
 * The time start value for each level to select the bucket at enqueue
 * time.
 */
#define LVL_START(n)	((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))

/* Size of each clock level */
#define LVL_BITS	6
#define LVL_SIZE	(1UL << LVL_BITS)
#define LVL_MASK	(LVL_SIZE - 1)
#define LVL_OFFS(n)	((n) * LVL_SIZE)

/* Level depth */
#if HZ > 100
# define LVL_DEPTH	9
# else
# define LVL_DEPTH	8
#endif

/* The cutoff (max. capacity of the wheel) */
#define WHEEL_TIMEOUT_CUTOFF	(LVL_START(LVL_DEPTH))
#define WHEEL_TIMEOUT_MAX	(WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))

/*
 * The resulting wheel size. If NOHZ is configured we allocate two
 * wheels so we have a separate storage for the deferrable timers.
 */
#define WHEEL_SIZE	(LVL_SIZE * LVL_DEPTH)

#ifdef CONFIG_NO_HZ_COMMON
# define NR_BASES	2
# define BASE_STD	0
# define BASE_DEF	1
#else
# define NR_BASES	1
# define BASE_STD	0
# define BASE_DEF	0
#endif
L
Linus Torvalds 已提交
194

195
struct timer_base {
196 197 198 199 200 201 202 203
	spinlock_t		lock;
	struct timer_list	*running_timer;
	unsigned long		clk;
	unsigned int		cpu;
	bool			migration_enabled;
	bool			nohz_active;
	DECLARE_BITMAP(pending_map, WHEEL_SIZE);
	struct hlist_head	vectors[WHEEL_SIZE];
204
} ____cacheline_aligned;
L
Linus Torvalds 已提交
205

206
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
207

208 209 210
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;

211
void timers_update_migration(bool update_nohz)
212 213 214 215 216
{
	bool on = sysctl_timer_migration && tick_nohz_active;
	unsigned int cpu;

	/* Avoid the loop, if nothing to update */
217
	if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
218 219 220
		return;

	for_each_possible_cpu(cpu) {
221 222
		per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
		per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
223
		per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
224 225
		if (!update_nohz)
			continue;
226 227
		per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
		per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
228
		per_cpu(hrtimer_bases.nohz_active, cpu) = true;
229 230 231 232 233 234 235 236 237 238 239 240 241
	}
}

int timer_migration_handler(struct ctl_table *table, int write,
			    void __user *buffer, size_t *lenp,
			    loff_t *ppos)
{
	static DEFINE_MUTEX(mutex);
	int ret;

	mutex_lock(&mutex);
	ret = proc_dointvec(table, write, buffer, lenp, ppos);
	if (!ret && write)
242
		timers_update_migration(false);
243 244 245 246 247
	mutex_unlock(&mutex);
	return ret;
}
#endif

248 249
static unsigned long round_jiffies_common(unsigned long j, int cpu,
		bool force_up)
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
{
	int rem;
	unsigned long original = j;

	/*
	 * We don't want all cpus firing their timers at once hitting the
	 * same lock or cachelines, so we skew each extra cpu with an extra
	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
	 * already did this.
	 * The skew is done by adding 3*cpunr, then round, then subtract this
	 * extra offset again.
	 */
	j += cpu * 3;

	rem = j % HZ;

	/*
	 * If the target jiffie is just after a whole second (which can happen
	 * due to delays of the timer irq, long irq off times etc etc) then
	 * we should round down to the whole second, not up. Use 1/4th second
	 * as cutoff for this rounding as an extreme upper bound for this.
271
	 * But never round down if @force_up is set.
272
	 */
273
	if (rem < HZ/4 && !force_up) /* round down */
274 275 276 277 278 279 280
		j = j - rem;
	else /* round up */
		j = j - rem + HZ;

	/* now that we have rounded, subtract the extra skew again */
	j -= cpu * 3;

281 282 283 284 285
	/*
	 * Make sure j is still in the future. Otherwise return the
	 * unmodified value.
	 */
	return time_is_after_jiffies(j) ? j : original;
286
}
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311

/**
 * __round_jiffies - function to round jiffies to a full second
 * @j: the time in (absolute) jiffies that should be rounded
 * @cpu: the processor number on which the timeout will happen
 *
 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 * up or down to (approximately) full seconds. This is useful for timers
 * for which the exact time they fire does not matter too much, as long as
 * they fire approximately every X seconds.
 *
 * By rounding these timers to whole seconds, all such timers will fire
 * at the same time, rather than at various times spread out. The goal
 * of this is to have the CPU wake up less, which saves power.
 *
 * The exact rounding is skewed for each processor to avoid all
 * processors firing at the exact same time, which could lead
 * to lock contention or spurious cache line bouncing.
 *
 * The return value is the rounded version of the @j parameter.
 */
unsigned long __round_jiffies(unsigned long j, int cpu)
{
	return round_jiffies_common(j, cpu, false);
}
312 313 314 315 316 317 318
EXPORT_SYMBOL_GPL(__round_jiffies);

/**
 * __round_jiffies_relative - function to round jiffies to a full second
 * @j: the time in (relative) jiffies that should be rounded
 * @cpu: the processor number on which the timeout will happen
 *
319
 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
320 321 322 323 324 325 326 327 328 329 330 331
 * up or down to (approximately) full seconds. This is useful for timers
 * for which the exact time they fire does not matter too much, as long as
 * they fire approximately every X seconds.
 *
 * By rounding these timers to whole seconds, all such timers will fire
 * at the same time, rather than at various times spread out. The goal
 * of this is to have the CPU wake up less, which saves power.
 *
 * The exact rounding is skewed for each processor to avoid all
 * processors firing at the exact same time, which could lead
 * to lock contention or spurious cache line bouncing.
 *
332
 * The return value is the rounded version of the @j parameter.
333 334 335
 */
unsigned long __round_jiffies_relative(unsigned long j, int cpu)
{
336 337 338 339
	unsigned long j0 = jiffies;

	/* Use j0 because jiffies might change while we run */
	return round_jiffies_common(j + j0, cpu, false) - j0;
340 341 342 343 344 345 346
}
EXPORT_SYMBOL_GPL(__round_jiffies_relative);

/**
 * round_jiffies - function to round jiffies to a full second
 * @j: the time in (absolute) jiffies that should be rounded
 *
347
 * round_jiffies() rounds an absolute time in the future (in jiffies)
348 349 350 351 352 353 354 355
 * up or down to (approximately) full seconds. This is useful for timers
 * for which the exact time they fire does not matter too much, as long as
 * they fire approximately every X seconds.
 *
 * By rounding these timers to whole seconds, all such timers will fire
 * at the same time, rather than at various times spread out. The goal
 * of this is to have the CPU wake up less, which saves power.
 *
356
 * The return value is the rounded version of the @j parameter.
357 358 359
 */
unsigned long round_jiffies(unsigned long j)
{
360
	return round_jiffies_common(j, raw_smp_processor_id(), false);
361 362 363 364 365 366 367
}
EXPORT_SYMBOL_GPL(round_jiffies);

/**
 * round_jiffies_relative - function to round jiffies to a full second
 * @j: the time in (relative) jiffies that should be rounded
 *
368
 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
369 370 371 372 373 374 375 376
 * up or down to (approximately) full seconds. This is useful for timers
 * for which the exact time they fire does not matter too much, as long as
 * they fire approximately every X seconds.
 *
 * By rounding these timers to whole seconds, all such timers will fire
 * at the same time, rather than at various times spread out. The goal
 * of this is to have the CPU wake up less, which saves power.
 *
377
 * The return value is the rounded version of the @j parameter.
378 379 380 381 382 383 384
 */
unsigned long round_jiffies_relative(unsigned long j)
{
	return __round_jiffies_relative(j, raw_smp_processor_id());
}
EXPORT_SYMBOL_GPL(round_jiffies_relative);

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
/**
 * __round_jiffies_up - function to round jiffies up to a full second
 * @j: the time in (absolute) jiffies that should be rounded
 * @cpu: the processor number on which the timeout will happen
 *
 * This is the same as __round_jiffies() except that it will never
 * round down.  This is useful for timeouts for which the exact time
 * of firing does not matter too much, as long as they don't fire too
 * early.
 */
unsigned long __round_jiffies_up(unsigned long j, int cpu)
{
	return round_jiffies_common(j, cpu, true);
}
EXPORT_SYMBOL_GPL(__round_jiffies_up);

/**
 * __round_jiffies_up_relative - function to round jiffies up to a full second
 * @j: the time in (relative) jiffies that should be rounded
 * @cpu: the processor number on which the timeout will happen
 *
 * This is the same as __round_jiffies_relative() except that it will never
 * round down.  This is useful for timeouts for which the exact time
 * of firing does not matter too much, as long as they don't fire too
 * early.
 */
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
{
	unsigned long j0 = jiffies;

	/* Use j0 because jiffies might change while we run */
	return round_jiffies_common(j + j0, cpu, true) - j0;
}
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);

/**
 * round_jiffies_up - function to round jiffies up to a full second
 * @j: the time in (absolute) jiffies that should be rounded
 *
 * This is the same as round_jiffies() except that it will never
 * round down.  This is useful for timeouts for which the exact time
 * of firing does not matter too much, as long as they don't fire too
 * early.
 */
unsigned long round_jiffies_up(unsigned long j)
{
	return round_jiffies_common(j, raw_smp_processor_id(), true);
}
EXPORT_SYMBOL_GPL(round_jiffies_up);

/**
 * round_jiffies_up_relative - function to round jiffies up to a full second
 * @j: the time in (relative) jiffies that should be rounded
 *
 * This is the same as round_jiffies_relative() except that it will never
 * round down.  This is useful for timeouts for which the exact time
 * of firing does not matter too much, as long as they don't fire too
 * early.
 */
unsigned long round_jiffies_up_relative(unsigned long j)
{
	return __round_jiffies_up_relative(j, raw_smp_processor_id());
}
EXPORT_SYMBOL_GPL(round_jiffies_up_relative);

450

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static inline unsigned int timer_get_idx(struct timer_list *timer)
{
	return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
}

static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
{
	timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
			idx << TIMER_ARRAYSHIFT;
}

/*
 * Helper function to calculate the array index for a given expiry
 * time.
 */
static inline unsigned calc_index(unsigned expires, unsigned lvl)
{
	expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
	return LVL_OFFS(lvl) + (expires & LVL_MASK);
}

472
static void
473
__internal_add_timer(struct timer_base *base, struct timer_list *timer)
L
Linus Torvalds 已提交
474 475
{
	unsigned long expires = timer->expires;
476
	unsigned long delta = expires - base->clk;
477
	struct hlist_head *vec;
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
	unsigned int idx;

	if (delta < LVL_START(1)) {
		idx = calc_index(expires, 0);
	} else if (delta < LVL_START(2)) {
		idx = calc_index(expires, 1);
	} else if (delta < LVL_START(3)) {
		idx = calc_index(expires, 2);
	} else if (delta < LVL_START(4)) {
		idx = calc_index(expires, 3);
	} else if (delta < LVL_START(5)) {
		idx = calc_index(expires, 4);
	} else if (delta < LVL_START(6)) {
		idx = calc_index(expires, 5);
	} else if (delta < LVL_START(7)) {
		idx = calc_index(expires, 6);
	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
		idx = calc_index(expires, 7);
	} else if ((long) delta < 0) {
		idx = base->clk & LVL_MASK;
L
Linus Torvalds 已提交
498
	} else {
499 500 501
		/*
		 * Force expire obscene large timeouts to expire at the
		 * capacity limit of the wheel.
L
Linus Torvalds 已提交
502
		 */
503 504
		if (expires >= WHEEL_TIMEOUT_CUTOFF)
			expires = WHEEL_TIMEOUT_MAX;
T
Thomas Gleixner 已提交
505

506 507 508 509 510 511 512
		idx = calc_index(expires, LVL_DEPTH - 1);
	}
	/*
	 * Enqueue the timer into the array bucket, mark it pending in
	 * the bitmap and store the index in the timer flags.
	 */
	vec = base->vectors + idx;
513
	hlist_add_head(&timer->entry, vec);
514 515
	__set_bit(idx, base->pending_map);
	timer_set_idx(timer, idx);
L
Linus Torvalds 已提交
516 517
}

518
static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
519 520
{
	__internal_add_timer(base, timer);
521 522 523

	/*
	 * Check whether the other CPU is in dynticks mode and needs
524 525 526 527 528
	 * to be triggered to reevaluate the timer wheel.  We are
	 * protected against the other CPU fiddling with the timer by
	 * holding the timer base lock. This also makes sure that a
	 * CPU on the way to stop its tick can not evaluate the timer
	 * wheel.
529 530 531 532 533 534
	 *
	 * Spare the IPI for deferrable timers on idle targets though.
	 * The next busy ticks will take care of it. Except full dynticks
	 * require special care against races with idle_cpu(), lets deal
	 * with that later.
	 */
535
	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) {
536 537 538 539
		if (!(timer->flags & TIMER_DEFERRABLE) ||
		    tick_nohz_full_cpu(base->cpu))
			wake_up_nohz_cpu(base->cpu);
	}
540 541
}

542 543 544 545 546 547 548 549 550 551
#ifdef CONFIG_TIMER_STATS
void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
{
	if (timer->start_site)
		return;

	timer->start_site = addr;
	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
	timer->start_pid = current->pid;
}
552 553 554

static void timer_stats_account_timer(struct timer_list *timer)
{
555 556 557 558 559 560 561 562
	void *site;

	/*
	 * start_site can be concurrently reset by
	 * timer_stats_timer_clear_start_info()
	 */
	site = READ_ONCE(timer->start_site);
	if (likely(!site))
563
		return;
564

565
	timer_stats_update_stats(timer, timer->start_pid, site,
566 567
				 timer->function, timer->start_comm,
				 timer->flags);
568 569 570 571
}

#else
static void timer_stats_account_timer(struct timer_list *timer) {}
572 573
#endif

574 575 576 577
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS

static struct debug_obj_descr timer_debug_descr;

578 579 580 581 582
static void *timer_debug_hint(void *addr)
{
	return ((struct timer_list *) addr)->function;
}

583 584 585 586 587 588 589 590
static bool timer_is_static_object(void *addr)
{
	struct timer_list *timer = addr;

	return (timer->entry.pprev == NULL &&
		timer->entry.next == TIMER_ENTRY_STATIC);
}

591 592 593
/*
 * fixup_init is called when:
 * - an active object is initialized
594
 */
595
static bool timer_fixup_init(void *addr, enum debug_obj_state state)
596 597 598 599 600 601 602
{
	struct timer_list *timer = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		del_timer_sync(timer);
		debug_object_init(timer, &timer_debug_descr);
603
		return true;
604
	default:
605
		return false;
606 607 608
	}
}

609 610 611 612 613 614
/* Stub timer callback for improperly used timers. */
static void stub_timer(unsigned long data)
{
	WARN_ON(1);
}

615 616 617
/*
 * fixup_activate is called when:
 * - an active object is activated
618
 * - an unknown non-static object is activated
619
 */
620
static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
621 622 623 624 625
{
	struct timer_list *timer = addr;

	switch (state) {
	case ODEBUG_STATE_NOTAVAILABLE:
626 627
		setup_timer(timer, stub_timer, 0);
		return true;
628 629 630 631 632

	case ODEBUG_STATE_ACTIVE:
		WARN_ON(1);

	default:
633
		return false;
634 635 636 637 638 639 640
	}
}

/*
 * fixup_free is called when:
 * - an active object is freed
 */
641
static bool timer_fixup_free(void *addr, enum debug_obj_state state)
642 643 644 645 646 647 648
{
	struct timer_list *timer = addr;

	switch (state) {
	case ODEBUG_STATE_ACTIVE:
		del_timer_sync(timer);
		debug_object_free(timer, &timer_debug_descr);
649
		return true;
650
	default:
651
		return false;
652 653 654
	}
}

655 656 657 658
/*
 * fixup_assert_init is called when:
 * - an untracked/uninit-ed object is found
 */
659
static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
660 661 662 663 664
{
	struct timer_list *timer = addr;

	switch (state) {
	case ODEBUG_STATE_NOTAVAILABLE:
665 666
		setup_timer(timer, stub_timer, 0);
		return true;
667
	default:
668
		return false;
669 670 671
	}
}

672
static struct debug_obj_descr timer_debug_descr = {
673 674
	.name			= "timer_list",
	.debug_hint		= timer_debug_hint,
675
	.is_static_object	= timer_is_static_object,
676 677 678 679
	.fixup_init		= timer_fixup_init,
	.fixup_activate		= timer_fixup_activate,
	.fixup_free		= timer_fixup_free,
	.fixup_assert_init	= timer_fixup_assert_init,
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
};

static inline void debug_timer_init(struct timer_list *timer)
{
	debug_object_init(timer, &timer_debug_descr);
}

static inline void debug_timer_activate(struct timer_list *timer)
{
	debug_object_activate(timer, &timer_debug_descr);
}

static inline void debug_timer_deactivate(struct timer_list *timer)
{
	debug_object_deactivate(timer, &timer_debug_descr);
}

static inline void debug_timer_free(struct timer_list *timer)
{
	debug_object_free(timer, &timer_debug_descr);
}

702 703 704 705 706
static inline void debug_timer_assert_init(struct timer_list *timer)
{
	debug_object_assert_init(timer, &timer_debug_descr);
}

T
Tejun Heo 已提交
707 708
static void do_init_timer(struct timer_list *timer, unsigned int flags,
			  const char *name, struct lock_class_key *key);
709

T
Tejun Heo 已提交
710 711
void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
			     const char *name, struct lock_class_key *key)
712 713
{
	debug_object_init_on_stack(timer, &timer_debug_descr);
T
Tejun Heo 已提交
714
	do_init_timer(timer, flags, name, key);
715
}
716
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
717 718 719 720 721 722 723 724 725 726 727

void destroy_timer_on_stack(struct timer_list *timer)
{
	debug_object_free(timer, &timer_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_timer_on_stack);

#else
static inline void debug_timer_init(struct timer_list *timer) { }
static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
728
static inline void debug_timer_assert_init(struct timer_list *timer) { }
729 730
#endif

731 732 733 734 735 736 737 738 739 740
static inline void debug_init(struct timer_list *timer)
{
	debug_timer_init(timer);
	trace_timer_init(timer);
}

static inline void
debug_activate(struct timer_list *timer, unsigned long expires)
{
	debug_timer_activate(timer);
741
	trace_timer_start(timer, expires, timer->flags);
742 743 744 745 746 747 748 749
}

static inline void debug_deactivate(struct timer_list *timer)
{
	debug_timer_deactivate(timer);
	trace_timer_cancel(timer);
}

750 751 752 753 754
static inline void debug_assert_init(struct timer_list *timer)
{
	debug_timer_assert_init(timer);
}

T
Tejun Heo 已提交
755 756
static void do_init_timer(struct timer_list *timer, unsigned int flags,
			  const char *name, struct lock_class_key *key)
757
{
758
	timer->entry.pprev = NULL;
759
	timer->flags = flags | raw_smp_processor_id();
760 761 762 763 764
#ifdef CONFIG_TIMER_STATS
	timer->start_site = NULL;
	timer->start_pid = -1;
	memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
765
	lockdep_init_map(&timer->lockdep_map, name, key, 0);
766
}
767 768

/**
R
Randy Dunlap 已提交
769
 * init_timer_key - initialize a timer
770
 * @timer: the timer to be initialized
T
Tejun Heo 已提交
771
 * @flags: timer flags
R
Randy Dunlap 已提交
772 773 774
 * @name: name of the timer
 * @key: lockdep class key of the fake lock used for tracking timer
 *       sync lock dependencies
775
 *
R
Randy Dunlap 已提交
776
 * init_timer_key() must be done to a timer prior calling *any* of the
777 778
 * other timer functions.
 */
T
Tejun Heo 已提交
779 780
void init_timer_key(struct timer_list *timer, unsigned int flags,
		    const char *name, struct lock_class_key *key)
781
{
782
	debug_init(timer);
T
Tejun Heo 已提交
783
	do_init_timer(timer, flags, name, key);
784
}
785
EXPORT_SYMBOL(init_timer_key);
786

787
static inline void detach_timer(struct timer_list *timer, bool clear_pending)
788
{
789
	struct hlist_node *entry = &timer->entry;
790

791
	debug_deactivate(timer);
792

793
	__hlist_del(entry);
794
	if (clear_pending)
795 796
		entry->pprev = NULL;
	entry->next = LIST_POISON2;
797 798
}

799
static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
800 801
			     bool clear_pending)
{
802 803
	unsigned idx = timer_get_idx(timer);

804 805 806
	if (!timer_pending(timer))
		return 0;

807 808 809
	if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
		__clear_bit(idx, base->pending_map);

810 811 812 813
	detach_timer(timer, clear_pending);
	return 1;
}

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
{
	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);

	/*
	 * If the timer is deferrable and nohz is active then we need to use
	 * the deferrable base.
	 */
	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
	    (tflags & TIMER_DEFERRABLE))
		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
	return base;
}

static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
{
	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);

	/*
	 * If the timer is deferrable and nohz is active then we need to use
	 * the deferrable base.
	 */
	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
	    (tflags & TIMER_DEFERRABLE))
		base = this_cpu_ptr(&timer_bases[BASE_DEF]);
	return base;
}

static inline struct timer_base *get_timer_base(u32 tflags)
{
	return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
}

static inline struct timer_base *get_target_base(struct timer_base *base,
						 unsigned tflags)
{
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
	if ((tflags & TIMER_PINNED) || !base->migration_enabled)
		return get_timer_this_cpu_base(tflags);
	return get_timer_cpu_base(tflags, get_nohz_timer_target());
#else
	return get_timer_this_cpu_base(tflags);
#endif
}

859
/*
860 861 862
 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
 * that all timers which are tied to this base are locked, and the base itself
 * is locked too.
863 864
 *
 * So __run_timers/migrate_timers can safely modify all timers which could
865
 * be found in the base->vectors array.
866
 *
867 868
 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
 * to wait until the migration is done.
869
 */
870
static struct timer_base *lock_timer_base(struct timer_list *timer,
871
					  unsigned long *flags)
872
	__acquires(timer->base->lock)
873 874
{
	for (;;) {
875
		struct timer_base *base;
876
		u32 tf = timer->flags;
877 878

		if (!(tf & TIMER_MIGRATING)) {
879
			base = get_timer_base(tf);
880
			spin_lock_irqsave(&base->lock, *flags);
881
			if (timer->flags == tf)
882 883 884 885 886 887 888
				return base;
			spin_unlock_irqrestore(&base->lock, *flags);
		}
		cpu_relax();
	}
}

I
Ingo Molnar 已提交
889
static inline int
890
__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
L
Linus Torvalds 已提交
891
{
892
	struct timer_base *base, *new_base;
L
Linus Torvalds 已提交
893
	unsigned long flags;
894
	int ret = 0;
L
Linus Torvalds 已提交
895

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
	/*
	 * TODO: Calculate the array bucket of the timer right here w/o
	 * holding the base lock. This allows to check not only
	 * timer->expires == expires below, but also whether the timer
	 * ends up in the same bucket. If we really need to requeue
	 * the timer then we check whether base->clk have
	 * advanced between here and locking the timer base. If
	 * jiffies advanced we have to recalc the array bucket with the
	 * lock held.
	 */

	/*
	 * This is a common optimization triggered by the
	 * networking code - if the timer is re-modified
	 * to be the same thing then just return:
	 */
	if (timer_pending(timer)) {
		if (timer->expires == expires)
			return 1;
	}

917
	timer_stats_timer_set_start_info(timer);
L
Linus Torvalds 已提交
918 919
	BUG_ON(!timer->function);

920 921
	base = lock_timer_base(timer, &flags);

922 923 924
	ret = detach_if_pending(timer, base, false);
	if (!ret && pending_only)
		goto out_unlock;
925

926
	debug_activate(timer, expires);
927

928
	new_base = get_target_base(base, timer->flags);
929

930
	if (base != new_base) {
L
Linus Torvalds 已提交
931
		/*
932
		 * We are trying to schedule the timer on the new base.
933 934
		 * However we can't change timer's base while it is running,
		 * otherwise del_timer_sync() can't detect that the timer's
935 936
		 * handler yet has not finished. This also guarantees that the
		 * timer is serialized wrt itself.
L
Linus Torvalds 已提交
937
		 */
938
		if (likely(base->running_timer != timer)) {
939
			/* See the comment in lock_timer_base() */
940 941
			timer->flags |= TIMER_MIGRATING;

942
			spin_unlock(&base->lock);
943 944
			base = new_base;
			spin_lock(&base->lock);
945 946
			WRITE_ONCE(timer->flags,
				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
L
Linus Torvalds 已提交
947 948 949 950
		}
	}

	timer->expires = expires;
951
	internal_add_timer(base, timer);
I
Ingo Molnar 已提交
952 953

out_unlock:
954
	spin_unlock_irqrestore(&base->lock, flags);
L
Linus Torvalds 已提交
955 956 957 958

	return ret;
}

959
/**
I
Ingo Molnar 已提交
960 961 962
 * mod_timer_pending - modify a pending timer's timeout
 * @timer: the pending timer to be modified
 * @expires: new timeout in jiffies
L
Linus Torvalds 已提交
963
 *
I
Ingo Molnar 已提交
964 965 966 967
 * mod_timer_pending() is the same for pending timers as mod_timer(),
 * but will not re-activate and modify already deleted timers.
 *
 * It is useful for unserialized use of timers.
L
Linus Torvalds 已提交
968
 */
I
Ingo Molnar 已提交
969
int mod_timer_pending(struct timer_list *timer, unsigned long expires)
L
Linus Torvalds 已提交
970
{
971
	return __mod_timer(timer, expires, true);
L
Linus Torvalds 已提交
972
}
I
Ingo Molnar 已提交
973
EXPORT_SYMBOL(mod_timer_pending);
L
Linus Torvalds 已提交
974

975
/**
L
Linus Torvalds 已提交
976 977
 * mod_timer - modify a timer's timeout
 * @timer: the timer to be modified
978
 * @expires: new timeout in jiffies
L
Linus Torvalds 已提交
979
 *
980
 * mod_timer() is a more efficient way to update the expire field of an
L
Linus Torvalds 已提交
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
 * active timer (if the timer is inactive it will be activated)
 *
 * mod_timer(timer, expires) is equivalent to:
 *
 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 *
 * Note that if there are multiple unserialized concurrent users of the
 * same timer, then mod_timer() is the only safe way to modify the timeout,
 * since add_timer() cannot modify an already running timer.
 *
 * The function returns whether it has modified a pending timer or not.
 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 * active timer returns 1.)
 */
int mod_timer(struct timer_list *timer, unsigned long expires)
{
997
	return __mod_timer(timer, expires, false);
L
Linus Torvalds 已提交
998 999 1000
}
EXPORT_SYMBOL(mod_timer);

I
Ingo Molnar 已提交
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
/**
 * add_timer - start a timer
 * @timer: the timer to be added
 *
 * The kernel will do a ->function(->data) callback from the
 * timer interrupt at the ->expires point in the future. The
 * current time is 'jiffies'.
 *
 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 * fields must be set prior calling this function.
 *
 * Timers with an ->expires field in the past will be executed in the next
 * timer tick.
 */
void add_timer(struct timer_list *timer)
{
	BUG_ON(timer_pending(timer));
	mod_timer(timer, timer->expires);
}
EXPORT_SYMBOL(add_timer);

/**
 * add_timer_on - start a timer on a particular CPU
 * @timer: the timer to be added
 * @cpu: the CPU to start it on
 *
 * This is not very scalable on SMP. Double adds are not possible.
 */
void add_timer_on(struct timer_list *timer, int cpu)
{
1031
	struct timer_base *new_base, *base;
I
Ingo Molnar 已提交
1032 1033 1034 1035
	unsigned long flags;

	timer_stats_timer_set_start_info(timer);
	BUG_ON(timer_pending(timer) || !timer->function);
1036

1037 1038
	new_base = get_timer_cpu_base(timer->flags, cpu);

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	/*
	 * If @timer was on a different CPU, it should be migrated with the
	 * old base locked to prevent other operations proceeding with the
	 * wrong base locked.  See lock_timer_base().
	 */
	base = lock_timer_base(timer, &flags);
	if (base != new_base) {
		timer->flags |= TIMER_MIGRATING;

		spin_unlock(&base->lock);
		base = new_base;
		spin_lock(&base->lock);
		WRITE_ONCE(timer->flags,
			   (timer->flags & ~TIMER_BASEMASK) | cpu);
	}

1055
	debug_activate(timer, timer->expires);
I
Ingo Molnar 已提交
1056 1057 1058
	internal_add_timer(base, timer);
	spin_unlock_irqrestore(&base->lock, flags);
}
A
Andi Kleen 已提交
1059
EXPORT_SYMBOL_GPL(add_timer_on);
I
Ingo Molnar 已提交
1060

1061
/**
L
Linus Torvalds 已提交
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
 * del_timer - deactive a timer.
 * @timer: the timer to be deactivated
 *
 * del_timer() deactivates a timer - this works on both active and inactive
 * timers.
 *
 * The function returns whether it has deactivated a pending timer or not.
 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
 * active timer returns 1.)
 */
int del_timer(struct timer_list *timer)
{
1074
	struct timer_base *base;
L
Linus Torvalds 已提交
1075
	unsigned long flags;
1076
	int ret = 0;
L
Linus Torvalds 已提交
1077

1078 1079
	debug_assert_init(timer);

1080
	timer_stats_timer_clear_start_info(timer);
1081 1082
	if (timer_pending(timer)) {
		base = lock_timer_base(timer, &flags);
1083
		ret = detach_if_pending(timer, base, true);
L
Linus Torvalds 已提交
1084 1085 1086
		spin_unlock_irqrestore(&base->lock, flags);
	}

1087
	return ret;
L
Linus Torvalds 已提交
1088 1089 1090
}
EXPORT_SYMBOL(del_timer);

1091 1092 1093 1094
/**
 * try_to_del_timer_sync - Try to deactivate a timer
 * @timer: timer do del
 *
1095 1096 1097 1098 1099
 * This function tries to deactivate a timer. Upon successful (ret >= 0)
 * exit the timer is not queued and the handler is not running on any CPU.
 */
int try_to_del_timer_sync(struct timer_list *timer)
{
1100
	struct timer_base *base;
1101 1102 1103
	unsigned long flags;
	int ret = -1;

1104 1105
	debug_assert_init(timer);

1106 1107
	base = lock_timer_base(timer, &flags);

1108 1109 1110
	if (base->running_timer != timer) {
		timer_stats_timer_clear_start_info(timer);
		ret = detach_if_pending(timer, base, true);
1111 1112 1113 1114 1115
	}
	spin_unlock_irqrestore(&base->lock, flags);

	return ret;
}
1116 1117
EXPORT_SYMBOL(try_to_del_timer_sync);

1118
#ifdef CONFIG_SMP
1119
/**
L
Linus Torvalds 已提交
1120 1121 1122 1123 1124 1125 1126
 * del_timer_sync - deactivate a timer and wait for the handler to finish.
 * @timer: the timer to be deactivated
 *
 * This function only differs from del_timer() on SMP: besides deactivating
 * the timer it also makes sure the handler has finished executing on other
 * CPUs.
 *
1127
 * Synchronization rules: Callers must prevent restarting of the timer,
L
Linus Torvalds 已提交
1128
 * otherwise this function is meaningless. It must not be called from
T
Tejun Heo 已提交
1129 1130 1131 1132
 * interrupt contexts unless the timer is an irqsafe one. The caller must
 * not hold locks which would prevent completion of the timer's
 * handler. The timer's handler must not call add_timer_on(). Upon exit the
 * timer is not queued and the handler is not running on any CPU.
L
Linus Torvalds 已提交
1133
 *
T
Tejun Heo 已提交
1134 1135 1136
 * Note: For !irqsafe timers, you must not hold locks that are held in
 *   interrupt context while calling this function. Even if the lock has
 *   nothing to do with the timer in question.  Here's why:
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
 *
 *    CPU0                             CPU1
 *    ----                             ----
 *                                   <SOFTIRQ>
 *                                   call_timer_fn();
 *                                     base->running_timer = mytimer;
 *  spin_lock_irq(somelock);
 *                                     <IRQ>
 *                                        spin_lock(somelock);
 *  del_timer_sync(mytimer);
 *   while (base->running_timer == mytimer);
 *
 * Now del_timer_sync() will never return and never release somelock.
 * The interrupt on the other CPU is waiting to grab somelock but
 * it has interrupted the softirq that CPU0 is waiting to finish.
 *
L
Linus Torvalds 已提交
1153 1154 1155 1156
 * The function returns whether it has deactivated a pending timer or not.
 */
int del_timer_sync(struct timer_list *timer)
{
1157
#ifdef CONFIG_LOCKDEP
1158 1159
	unsigned long flags;

1160 1161 1162 1163
	/*
	 * If lockdep gives a backtrace here, please reference
	 * the synchronization rules above.
	 */
1164
	local_irq_save(flags);
1165 1166
	lock_map_acquire(&timer->lockdep_map);
	lock_map_release(&timer->lockdep_map);
1167
	local_irq_restore(flags);
1168
#endif
1169 1170 1171 1172
	/*
	 * don't use it in hardirq context, because it
	 * could lead to deadlock.
	 */
1173
	WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1174 1175 1176 1177
	for (;;) {
		int ret = try_to_del_timer_sync(timer);
		if (ret >= 0)
			return ret;
1178
		cpu_relax();
1179
	}
L
Linus Torvalds 已提交
1180
}
1181
EXPORT_SYMBOL(del_timer_sync);
L
Linus Torvalds 已提交
1182 1183
#endif

1184 1185 1186
static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
			  unsigned long data)
{
1187
	int count = preempt_count();
1188 1189 1190 1191 1192 1193 1194 1195 1196

#ifdef CONFIG_LOCKDEP
	/*
	 * It is permissible to free the timer from inside the
	 * function that is called from it, this we need to take into
	 * account for lockdep too. To avoid bogus "held lock freed"
	 * warnings as well as problems when looking into
	 * timer->lockdep_map, make a copy and use that here.
	 */
1197 1198 1199
	struct lockdep_map lockdep_map;

	lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
#endif
	/*
	 * Couple the lock chain with the lock chain at
	 * del_timer_sync() by acquiring the lock_map around the fn()
	 * call here and in del_timer_sync().
	 */
	lock_map_acquire(&lockdep_map);

	trace_timer_expire_entry(timer);
	fn(data);
	trace_timer_expire_exit(timer);

	lock_map_release(&lockdep_map);

1214
	if (count != preempt_count()) {
1215
		WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1216
			  fn, count, preempt_count());
1217 1218 1219 1220 1221 1222
		/*
		 * Restore the preempt count. That gives us a decent
		 * chance to survive and extract information. If the
		 * callback kept a lock held, bad luck, but not worse
		 * than the BUG() we had.
		 */
1223
		preempt_count_set(count);
1224 1225 1226
	}
}

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
static void expire_timers(struct timer_base *base, struct hlist_head *head)
{
	while (!hlist_empty(head)) {
		struct timer_list *timer;
		void (*fn)(unsigned long);
		unsigned long data;

		timer = hlist_entry(head->first, struct timer_list, entry);
		timer_stats_account_timer(timer);

		base->running_timer = timer;
		detach_timer(timer, true);

		fn = timer->function;
		data = timer->data;

		if (timer->flags & TIMER_IRQSAFE) {
			spin_unlock(&base->lock);
			call_timer_fn(timer, fn, data);
			spin_lock(&base->lock);
		} else {
			spin_unlock_irq(&base->lock);
			call_timer_fn(timer, fn, data);
			spin_lock_irq(&base->lock);
		}
	}
}

1255 1256
static int __collect_expired_timers(struct timer_base *base,
				    struct hlist_head *heads)
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
{
	unsigned long clk = base->clk;
	struct hlist_head *vec;
	int i, levels = 0;
	unsigned int idx;

	for (i = 0; i < LVL_DEPTH; i++) {
		idx = (clk & LVL_MASK) + i * LVL_SIZE;

		if (__test_and_clear_bit(idx, base->pending_map)) {
			vec = base->vectors + idx;
			hlist_move_list(vec, heads++);
			levels++;
		}
		/* Is it time to look at the next level? */
		if (clk & LVL_CLK_MASK)
			break;
		/* Shift clock for the next level granularity */
		clk >>= LVL_CLK_SHIFT;
	}
	return levels;
}
1279

1280
#ifdef CONFIG_NO_HZ_COMMON
L
Linus Torvalds 已提交
1281
/*
1282 1283 1284
 * Find the next pending bucket of a level. Search from level start (@offset)
 * + @clk upwards and if nothing there, search from start of the level
 * (@offset) up to @offset + clk.
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
 */
static int next_pending_bucket(struct timer_base *base, unsigned offset,
			       unsigned clk)
{
	unsigned pos, start = offset + clk;
	unsigned end = offset + LVL_SIZE;

	pos = find_next_bit(base->pending_map, end, start);
	if (pos < end)
		return pos - start;

	pos = find_next_bit(base->pending_map, start, offset);
	return pos < start ? pos + LVL_SIZE - start : -1;
}

/*
1301 1302
 * Search the first expiring timer in the various clock levels. Caller must
 * hold base->lock.
L
Linus Torvalds 已提交
1303
 */
1304
static unsigned long __next_timer_interrupt(struct timer_base *base)
L
Linus Torvalds 已提交
1305
{
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
	unsigned long clk, next, adj;
	unsigned lvl, offset = 0;

	next = base->clk + NEXT_TIMER_MAX_DELTA;
	clk = base->clk;
	for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
		int pos = next_pending_bucket(base, offset, clk & LVL_MASK);

		if (pos >= 0) {
			unsigned long tmp = clk + (unsigned long) pos;

			tmp <<= LVL_SHIFT(lvl);
			if (time_before(tmp, next))
				next = tmp;
L
Linus Torvalds 已提交
1320
		}
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
		/*
		 * Clock for the next level. If the current level clock lower
		 * bits are zero, we look at the next level as is. If not we
		 * need to advance it by one because that's going to be the
		 * next expiring bucket in that level. base->clk is the next
		 * expiring jiffie. So in case of:
		 *
		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
		 *  0    0    0    0    0    0
		 *
		 * we have to look at all levels @index 0. With
		 *
		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
		 *  0    0    0    0    0    2
		 *
		 * LVL0 has the next expiring bucket @index 2. The upper
		 * levels have the next expiring bucket @index 1.
		 *
		 * In case that the propagation wraps the next level the same
		 * rules apply:
		 *
		 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
		 *  0    0    0    0    F    2
		 *
		 * So after looking at LVL0 we get:
		 *
		 * LVL5 LVL4 LVL3 LVL2 LVL1
		 *  0    0    0    1    0
		 *
		 * So no propagation from LVL1 to LVL2 because that happened
		 * with the add already, but then we need to propagate further
		 * from LVL2 to LVL3.
		 *
		 * So the simple check whether the lower bits of the current
		 * level are 0 or not is sufficient for all cases.
		 */
		adj = clk & LVL_CLK_MASK ? 1 : 0;
		clk >>= LVL_CLK_SHIFT;
		clk += adj;
L
Linus Torvalds 已提交
1360
	}
1361
	return next;
1362
}
1363

1364 1365 1366 1367
/*
 * Check, if the next hrtimer event is before the next timer wheel
 * event:
 */
1368
static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1369
{
1370
	u64 nextevt = hrtimer_get_next_event();
1371

1372
	/*
1373 1374
	 * If high resolution timers are enabled
	 * hrtimer_get_next_event() returns KTIME_MAX.
1375
	 */
1376 1377
	if (expires <= nextevt)
		return expires;
1378 1379

	/*
1380 1381
	 * If the next timer is already expired, return the tick base
	 * time so the tick is fired immediately.
1382
	 */
1383 1384
	if (nextevt <= basem)
		return basem;
1385

1386
	/*
1387 1388 1389 1390 1391 1392
	 * Round up to the next jiffie. High resolution timers are
	 * off, so the hrtimers are expired in the tick and we need to
	 * make sure that this tick really expires the timer to avoid
	 * a ping pong of the nohz stop code.
	 *
	 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1393
	 */
1394
	return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
L
Linus Torvalds 已提交
1395
}
1396 1397

/**
1398 1399 1400 1401 1402 1403
 * get_next_timer_interrupt - return the time (clock mono) of the next timer
 * @basej:	base time jiffies
 * @basem:	base time clock monotonic
 *
 * Returns the tick aligned clock monotonic time of the next pending
 * timer or KTIME_MAX if no timer is pending.
1404
 */
1405
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1406
{
1407
	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1408 1409
	u64 expires = KTIME_MAX;
	unsigned long nextevt;
1410

1411 1412 1413 1414 1415
	/*
	 * Pretend that there is no timer pending if the cpu is offline.
	 * Possible pending timers will be migrated later to an active cpu.
	 */
	if (cpu_is_offline(smp_processor_id()))
1416 1417
		return expires;

1418
	spin_lock(&base->lock);
1419
	nextevt = __next_timer_interrupt(base);
1420 1421
	spin_unlock(&base->lock);

1422 1423 1424 1425
	if (time_before_eq(nextevt, basej))
		expires = basem;
	else
		expires = basem + (nextevt - basej) * TICK_NSEC;
1426

1427
	return cmp_next_hrtimer_event(basem, expires);
1428
}
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459

static int collect_expired_timers(struct timer_base *base,
				  struct hlist_head *heads)
{
	/*
	 * NOHZ optimization. After a long idle sleep we need to forward the
	 * base to current jiffies. Avoid a loop by searching the bitfield for
	 * the next expiring timer.
	 */
	if ((long)(jiffies - base->clk) > 2) {
		unsigned long next = __next_timer_interrupt(base);

		/*
		 * If the next timer is ahead of time forward to current
		 * jiffies, otherwise forward to the next expiry time.
		 */
		if (time_after(next, jiffies)) {
			/* The call site will increment clock! */
			base->clk = jiffies - 1;
			return 0;
		}
		base->clk = next;
	}
	return __collect_expired_timers(base, heads);
}
#else
static inline int collect_expired_timers(struct timer_base *base,
					 struct hlist_head *heads)
{
	return __collect_expired_timers(base, heads);
}
L
Linus Torvalds 已提交
1460 1461 1462
#endif

/*
D
Daniel Walker 已提交
1463
 * Called from the timer interrupt handler to charge one tick to the current
L
Linus Torvalds 已提交
1464 1465 1466 1467 1468 1469 1470
 * process.  user_tick is 1 if the tick is user time, 0 for system.
 */
void update_process_times(int user_tick)
{
	struct task_struct *p = current;

	/* Note: this timer irq context must be accounted for as well. */
1471
	account_process_tick(p, user_tick);
L
Linus Torvalds 已提交
1472
	run_local_timers();
1473
	rcu_check_callbacks(user_tick);
1474 1475
#ifdef CONFIG_IRQ_WORK
	if (in_irq())
1476
		irq_work_tick();
1477
#endif
L
Linus Torvalds 已提交
1478
	scheduler_tick();
1479
	run_posix_cpu_timers(p);
L
Linus Torvalds 已提交
1480 1481
}

1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
/**
 * __run_timers - run all expired timers (if any) on this CPU.
 * @base: the timer vector to be processed.
 */
static inline void __run_timers(struct timer_base *base)
{
	struct hlist_head heads[LVL_DEPTH];
	int levels;

	if (!time_after_eq(jiffies, base->clk))
		return;

	spin_lock_irq(&base->lock);

	while (time_after_eq(jiffies, base->clk)) {

		levels = collect_expired_timers(base, heads);
		base->clk++;

		while (levels--)
			expire_timers(base, heads + levels);
	}
	base->running_timer = NULL;
	spin_unlock_irq(&base->lock);
}

L
Linus Torvalds 已提交
1508 1509 1510 1511 1512
/*
 * This function runs timers and the timer-tq in bottom half context.
 */
static void run_timer_softirq(struct softirq_action *h)
{
1513
	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
L
Linus Torvalds 已提交
1514

1515 1516 1517
	__run_timers(base);
	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
L
Linus Torvalds 已提交
1518 1519 1520 1521 1522 1523 1524
}

/*
 * Called by the local, per-CPU timer interrupt on SMP.
 */
void run_local_timers(void)
{
1525
	hrtimer_run_queues();
L
Linus Torvalds 已提交
1526 1527 1528 1529 1530 1531 1532 1533 1534
	raise_softirq(TIMER_SOFTIRQ);
}

#ifdef __ARCH_WANT_SYS_ALARM

/*
 * For backwards compatibility?  This can be done in libc so Alpha
 * and all newer ports shouldn't need it.
 */
1535
SYSCALL_DEFINE1(alarm, unsigned int, seconds)
L
Linus Torvalds 已提交
1536
{
1537
	return alarm_setitimer(seconds);
L
Linus Torvalds 已提交
1538 1539 1540 1541 1542 1543
}

#endif

static void process_timeout(unsigned long __data)
{
1544
	wake_up_process((struct task_struct *)__data);
L
Linus Torvalds 已提交
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
}

/**
 * schedule_timeout - sleep until timeout
 * @timeout: timeout value in jiffies
 *
 * Make the current task sleep until @timeout jiffies have
 * elapsed. The routine will return immediately unless
 * the current task state has been set (see set_current_state()).
 *
 * You can set the task state as follows -
 *
 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
 * pass before the routine returns. The routine will return 0
 *
 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
 * delivered to the current task. In this case the remaining time
 * in jiffies will be returned, or 0 if the timer expired in time
 *
 * The current task state is guaranteed to be TASK_RUNNING when this
 * routine returns.
 *
 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
 * the CPU away without a bound on the timeout. In this case the return
 * value will be %MAX_SCHEDULE_TIMEOUT.
 *
 * In all cases the return value is guaranteed to be non-negative.
 */
1573
signed long __sched schedule_timeout(signed long timeout)
L
Linus Torvalds 已提交
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
{
	struct timer_list timer;
	unsigned long expire;

	switch (timeout)
	{
	case MAX_SCHEDULE_TIMEOUT:
		/*
		 * These two special cases are useful to be comfortable
		 * in the caller. Nothing more. We could take
		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
		 * but I' d like to return a valid offset (>=0) to allow
		 * the caller to do everything it want with the retval.
		 */
		schedule();
		goto out;
	default:
		/*
		 * Another bit of PARANOID. Note that the retval will be
		 * 0 since no piece of kernel is supposed to do a check
		 * for a negative retval of schedule_timeout() (since it
		 * should never happens anyway). You just have the printk()
		 * that will tell you if something is gone wrong and where.
		 */
1598
		if (timeout < 0) {
L
Linus Torvalds 已提交
1599
			printk(KERN_ERR "schedule_timeout: wrong timeout "
1600 1601
				"value %lx\n", timeout);
			dump_stack();
L
Linus Torvalds 已提交
1602 1603 1604 1605 1606 1607 1608
			current->state = TASK_RUNNING;
			goto out;
		}
	}

	expire = timeout + jiffies;

1609
	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1610
	__mod_timer(&timer, expire, false);
L
Linus Torvalds 已提交
1611 1612 1613
	schedule();
	del_singleshot_timer_sync(&timer);

1614 1615 1616
	/* Remove the timer from the object tracker */
	destroy_timer_on_stack(&timer);

L
Linus Torvalds 已提交
1617 1618 1619 1620 1621 1622 1623
	timeout = expire - jiffies;

 out:
	return timeout < 0 ? 0 : timeout;
}
EXPORT_SYMBOL(schedule_timeout);

1624 1625 1626 1627
/*
 * We can use __set_current_state() here because schedule_timeout() calls
 * schedule() unconditionally.
 */
1628 1629
signed long __sched schedule_timeout_interruptible(signed long timeout)
{
A
Andrew Morton 已提交
1630 1631
	__set_current_state(TASK_INTERRUPTIBLE);
	return schedule_timeout(timeout);
1632 1633 1634
}
EXPORT_SYMBOL(schedule_timeout_interruptible);

M
Matthew Wilcox 已提交
1635 1636 1637 1638 1639 1640 1641
signed long __sched schedule_timeout_killable(signed long timeout)
{
	__set_current_state(TASK_KILLABLE);
	return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_killable);

1642 1643
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{
A
Andrew Morton 已提交
1644 1645
	__set_current_state(TASK_UNINTERRUPTIBLE);
	return schedule_timeout(timeout);
1646 1647 1648
}
EXPORT_SYMBOL(schedule_timeout_uninterruptible);

1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
/*
 * Like schedule_timeout_uninterruptible(), except this task will not contribute
 * to load average.
 */
signed long __sched schedule_timeout_idle(signed long timeout)
{
	__set_current_state(TASK_IDLE);
	return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_idle);

L
Linus Torvalds 已提交
1660
#ifdef CONFIG_HOTPLUG_CPU
1661
static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
L
Linus Torvalds 已提交
1662 1663
{
	struct timer_list *timer;
1664
	int cpu = new_base->cpu;
L
Linus Torvalds 已提交
1665

1666 1667
	while (!hlist_empty(head)) {
		timer = hlist_entry(head->first, struct timer_list, entry);
1668
		detach_timer(timer, false);
1669
		timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
L
Linus Torvalds 已提交
1670 1671 1672 1673
		internal_add_timer(new_base, timer);
	}
}

1674
static void migrate_timers(int cpu)
L
Linus Torvalds 已提交
1675
{
1676 1677
	struct timer_base *old_base;
	struct timer_base *new_base;
1678
	int b, i;
L
Linus Torvalds 已提交
1679 1680

	BUG_ON(cpu_online(cpu));
1681

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695
	for (b = 0; b < NR_BASES; b++) {
		old_base = per_cpu_ptr(&timer_bases[b], cpu);
		new_base = get_cpu_ptr(&timer_bases[b]);
		/*
		 * The caller is globally serialized and nobody else
		 * takes two locks at once, deadlock is not possible.
		 */
		spin_lock_irq(&new_base->lock);
		spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

		BUG_ON(old_base->running_timer);

		for (i = 0; i < WHEEL_SIZE; i++)
			migrate_timer_list(new_base, old_base->vectors + i);
1696

1697 1698 1699 1700
		spin_unlock(&old_base->lock);
		spin_unlock_irq(&new_base->lock);
		put_cpu_ptr(&timer_bases);
	}
L
Linus Torvalds 已提交
1701 1702
}

1703
static int timer_cpu_notify(struct notifier_block *self,
L
Linus Torvalds 已提交
1704 1705
				unsigned long action, void *hcpu)
{
1706
	switch (action) {
L
Linus Torvalds 已提交
1707
	case CPU_DEAD:
1708
	case CPU_DEAD_FROZEN:
1709
		migrate_timers((long)hcpu);
L
Linus Torvalds 已提交
1710 1711 1712 1713
		break;
	default:
		break;
	}
1714

L
Linus Torvalds 已提交
1715 1716 1717
	return NOTIFY_OK;
}

1718 1719 1720 1721 1722 1723 1724
static inline void timer_register_cpu_notifier(void)
{
	cpu_notifier(timer_cpu_notify, 0);
}
#else
static inline void timer_register_cpu_notifier(void) { }
#endif /* CONFIG_HOTPLUG_CPU */
L
Linus Torvalds 已提交
1725

1726
static void __init init_timer_cpu(int cpu)
1727
{
1728 1729
	struct timer_base *base;
	int i;
1730

1731 1732 1733 1734 1735 1736
	for (i = 0; i < NR_BASES; i++) {
		base = per_cpu_ptr(&timer_bases[i], cpu);
		base->cpu = cpu;
		spin_lock_init(&base->lock);
		base->clk = jiffies;
	}
1737 1738 1739
}

static void __init init_timer_cpus(void)
L
Linus Torvalds 已提交
1740
{
1741 1742
	int cpu;

1743 1744
	for_each_possible_cpu(cpu)
		init_timer_cpu(cpu);
1745
}
1746

1747 1748 1749
void __init init_timers(void)
{
	init_timer_cpus();
1750
	init_timer_stats();
1751
	timer_register_cpu_notifier();
1752
	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
L
Linus Torvalds 已提交
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
}

/**
 * msleep - sleep safely even with waitqueue interruptions
 * @msecs: Time in milliseconds to sleep for
 */
void msleep(unsigned int msecs)
{
	unsigned long timeout = msecs_to_jiffies(msecs) + 1;

1763 1764
	while (timeout)
		timeout = schedule_timeout_uninterruptible(timeout);
L
Linus Torvalds 已提交
1765 1766 1767 1768 1769
}

EXPORT_SYMBOL(msleep);

/**
1770
 * msleep_interruptible - sleep waiting for signals
L
Linus Torvalds 已提交
1771 1772 1773 1774 1775 1776
 * @msecs: Time in milliseconds to sleep for
 */
unsigned long msleep_interruptible(unsigned int msecs)
{
	unsigned long timeout = msecs_to_jiffies(msecs) + 1;

1777 1778
	while (timeout && !signal_pending(current))
		timeout = schedule_timeout_interruptible(timeout);
L
Linus Torvalds 已提交
1779 1780 1781 1782
	return jiffies_to_msecs(timeout);
}

EXPORT_SYMBOL(msleep_interruptible);
1783

1784
static void __sched do_usleep_range(unsigned long min, unsigned long max)
1785 1786
{
	ktime_t kmin;
1787
	u64 delta;
1788 1789

	kmin = ktime_set(0, min * NSEC_PER_USEC);
1790
	delta = (u64)(max - min) * NSEC_PER_USEC;
1791
	schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1792 1793 1794 1795 1796 1797 1798
}

/**
 * usleep_range - Drop in replacement for udelay where wakeup is flexible
 * @min: Minimum time in usecs to sleep
 * @max: Maximum time in usecs to sleep
 */
1799
void __sched usleep_range(unsigned long min, unsigned long max)
1800 1801 1802 1803 1804
{
	__set_current_state(TASK_UNINTERRUPTIBLE);
	do_usleep_range(min, max);
}
EXPORT_SYMBOL(usleep_range);