tick-broadcast.c 22.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22
#include <linux/module.h>
23 24 25 26 27 28 29 30

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

31
static struct tick_device tick_broadcast_device;
32
static cpumask_var_t tick_broadcast_mask;
33
static cpumask_var_t tick_broadcast_on;
34
static cpumask_var_t tmpmask;
35
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
36
static int tick_broadcast_force;
37

38 39 40 41 42 43
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif

44 45 46 47 48 49 50 51
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

52
struct cpumask *tick_get_broadcast_mask(void)
53
{
54
	return tick_broadcast_mask;
55 56
}

57 58 59 60 61
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
62
	if (bc)
63 64 65 66 67 68
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static bool tick_check_broadcast_device(struct clock_event_device *curdev,
					struct clock_event_device *newdev)
{
	if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
	    (newdev->features & CLOCK_EVT_FEAT_C3STOP))
		return false;

	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
	    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
		return false;

	return !curdev || newdev->rating > curdev->rating;
}

/*
 * Conditionally install/replace broadcast device
 */
86
void tick_install_broadcast_device(struct clock_event_device *dev)
87
{
88 89
	struct clock_event_device *cur = tick_broadcast_device.evtdev;

90
	if (!tick_check_broadcast_device(cur, dev))
91
		return;
92

93 94
	if (!try_module_get(dev->owner))
		return;
95

96
	clockevents_exchange_device(cur, dev);
97 98
	if (cur)
		cur->event_handler = clockevents_handle_noop;
99
	tick_broadcast_device.evtdev = dev;
100
	if (!cpumask_empty(tick_broadcast_mask))
101
		tick_broadcast_start_periodic(dev);
102 103 104 105 106 107 108 109 110 111
	/*
	 * Inform all cpus about this. We might be in a situation
	 * where we did not switch to oneshot mode because the per cpu
	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
	 * of a oneshot capable broadcast device. Without that
	 * notification the systems stays stuck in periodic mode
	 * forever.
	 */
	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_clock_notify();
112 113 114 115 116 117 118 119 120 121
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

122 123 124 125 126
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

127 128 129 130 131 132 133 134 135 136 137
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

138 139 140 141 142 143
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
144
	struct clock_event_device *bc = tick_broadcast_device.evtdev;
145
	unsigned long flags;
146
	int ret;
147

148
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
149 150 151 152 153 154 155 156 157

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
158
		tick_device_setup_broadcast_func(dev);
159
		cpumask_set_cpu(cpu, tick_broadcast_mask);
160 161 162 163
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
164
		ret = 1;
165 166
	} else {
		/*
167 168
		 * Clear the broadcast bit for this cpu if the
		 * device is not power state affected.
169
		 */
170
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
171
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
172
		else
173
			tick_device_setup_broadcast_func(dev);
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215

		/*
		 * Clear the broadcast bit if the CPU is not in
		 * periodic broadcast on state.
		 */
		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
			cpumask_clear_cpu(cpu, tick_broadcast_mask);

		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_ONESHOT:
			/*
			 * If the system is in oneshot mode we can
			 * unconditionally clear the oneshot mask bit,
			 * because the CPU is running and therefore
			 * not in an idle state which causes the power
			 * state affected device to stop. Let the
			 * caller initialize the device.
			 */
			tick_broadcast_clear_oneshot(cpu);
			ret = 0;
			break;

		case TICKDEV_MODE_PERIODIC:
			/*
			 * If the system is in periodic mode, check
			 * whether the broadcast device can be
			 * switched off now.
			 */
			if (cpumask_empty(tick_broadcast_mask) && bc)
				clockevents_shutdown(bc);
			/*
			 * If we kept the cpu in the broadcast mask,
			 * tell the caller to leave the per cpu device
			 * in shutdown state. The periodic interrupt
			 * is delivered by the broadcast device.
			 */
			ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
			break;
		default:
			/* Nothing to do */
			ret = 0;
			break;
216 217
		}
	}
218
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
219 220 221
	return ret;
}

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

239
/*
240
 * Broadcast the event to the cpus, which are set in the mask (mangled).
241
 */
242
static void tick_do_broadcast(struct cpumask *mask)
243
{
244
	int cpu = smp_processor_id();
245 246 247 248 249
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
250 251
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
252 253 254 255
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

256
	if (!cpumask_empty(mask)) {
257 258 259 260 261 262
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
263 264
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
265 266 267 268 269 270 271 272 273
	}
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
274
	raw_spin_lock(&tick_broadcast_lock);
275

276 277
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	tick_do_broadcast(tmpmask);
278

279
	raw_spin_unlock(&tick_broadcast_lock);
280 281 282 283 284 285 286
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
287 288
	ktime_t next;

289 290 291 292 293 294 295 296 297 298
	tick_do_periodic_broadcast();

	/*
	 * The device is in periodic mode. No reprogramming necessary:
	 */
	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
		return;

	/*
	 * Setup the next period for devices, which do not have
299
	 * periodic mode. We read dev->next_event first and add to it
300
	 * when the event already expired. clockevents_program_event()
301 302
	 * sets dev->next_event only when the event is really
	 * programmed to the device.
303
	 */
304 305
	for (next = dev->next_event; ;) {
		next = ktime_add(next, tick_period);
306

307
		if (!clockevents_program_event(dev, next, false))
308 309 310 311 312 313 314 315 316
			return;
		tick_do_periodic_broadcast();
	}
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
317
static void tick_do_broadcast_on_off(unsigned long *reason)
318 319 320
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
321
	unsigned long flags;
322
	int cpu, bc_stopped;
323

324
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
325 326 327 328 329 330 331

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
332
	 * Is the device not affected by the powerstate ?
333
	 */
334
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
335 336
		goto out;

337 338
	if (!tick_device_is_functional(dev))
		goto out;
339

340
	bc_stopped = cpumask_empty(tick_broadcast_mask);
341

342 343 344
	switch (*reason) {
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
345
		cpumask_set_cpu(cpu, tick_broadcast_on);
346
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
347 348
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
349
				clockevents_shutdown(dev);
350
		}
351
		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
352
			tick_broadcast_force = 1;
353 354
		break;
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
355 356 357 358 359 360
		if (tick_broadcast_force)
			break;
		cpumask_clear_cpu(cpu, tick_broadcast_on);
		if (!tick_device_is_functional(dev))
			break;
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
361 362
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
363 364
				tick_setup_periodic(dev, 0);
		}
365
		break;
366 367
	}

368
	if (cpumask_empty(tick_broadcast_mask)) {
369
		if (!bc_stopped)
370
			clockevents_shutdown(bc);
371
	} else if (bc_stopped) {
372 373
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
374 375
		else
			tick_broadcast_setup_oneshot(bc);
376 377
	}
out:
378
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
379 380 381 382 383 384 385 386
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop.
 */
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
387
	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
388
		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
389
		       "offline CPU #%d\n", *oncpu);
390
	else
391
		tick_do_broadcast_on_off(&reason);
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
}

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

414
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
415 416

	bc = tick_broadcast_device.evtdev;
417
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
418
	cpumask_clear_cpu(cpu, tick_broadcast_on);
419 420

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
421
		if (bc && cpumask_empty(tick_broadcast_mask))
422
			clockevents_shutdown(bc);
423 424
	}

425
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
426
}
427

428 429 430 431 432
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

433
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
434 435

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
436
	if (bc)
437
		clockevents_shutdown(bc);
438

439
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
440 441 442 443 444 445 446 447
}

int tick_resume_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;
	int broadcast = 0;

448
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
449 450 451

	bc = tick_broadcast_device.evtdev;

452
	if (bc) {
T
Thomas Gleixner 已提交
453 454
		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);

455 456
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
457
			if (!cpumask_empty(tick_broadcast_mask))
458
				tick_broadcast_start_periodic(bc);
459
			broadcast = cpumask_test_cpu(smp_processor_id(),
460
						     tick_broadcast_mask);
461 462
			break;
		case TICKDEV_MODE_ONESHOT:
463
			if (!cpumask_empty(tick_broadcast_mask))
464
				broadcast = tick_resume_broadcast_oneshot(bc);
465 466
			break;
		}
467
	}
468
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
469 470 471 472 473

	return broadcast;
}


474 475
#ifdef CONFIG_TICK_ONESHOT

476
static cpumask_var_t tick_broadcast_oneshot_mask;
477
static cpumask_var_t tick_broadcast_pending_mask;
478
static cpumask_var_t tick_broadcast_force_mask;
479

480
/*
481
 * Exposed for debugging: see timer_list.c
482
 */
483
struct cpumask *tick_get_broadcast_oneshot_mask(void)
484
{
485
	return tick_broadcast_oneshot_mask;
486 487
}

488 489 490 491 492 493 494 495 496 497 498 499
/*
 * Called before going idle with interrupts disabled. Checks whether a
 * broadcast event from the other core is about to happen. We detected
 * that in tick_broadcast_oneshot_control(). The callsite can use this
 * to avoid a deep idle transition as we are about to get the
 * broadcast IPI right away.
 */
int tick_check_broadcast_expired(void)
{
	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
}

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
517
				    ktime_t expires, int force)
518
{
519 520
	int ret;

521 522 523
	if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);

524 525 526 527
	ret = clockevents_program_event(bc, expires, force);
	if (!ret)
		tick_broadcast_set_affinity(bc, cpumask_of(cpu));
	return ret;
528 529
}

530 531 532
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
533
	return 0;
534 535
}

536 537 538 539 540 541
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
void tick_check_oneshot_broadcast(int cpu)
{
542
	if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
543 544
		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);

545 546 547 548 549 550 551 552 553
		/*
		 * We might be in the middle of switching over from
		 * periodic to oneshot. If the CPU has not yet
		 * switched over, leave the device alone.
		 */
		if (td->mode == TICKDEV_MODE_ONESHOT) {
			clockevents_set_mode(td->evtdev,
					     CLOCK_EVT_MODE_ONESHOT);
		}
554 555 556
	}
}

557 558 559 560 561 562
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
563
	ktime_t now, next_event;
564
	int cpu, next_cpu = 0;
565

566
	raw_spin_lock(&tick_broadcast_lock);
567 568
again:
	dev->next_event.tv64 = KTIME_MAX;
569
	next_event.tv64 = KTIME_MAX;
570
	cpumask_clear(tmpmask);
571 572
	now = ktime_get();
	/* Find all expired events */
573
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
574
		td = &per_cpu(tick_cpu_device, cpu);
575
		if (td->evtdev->next_event.tv64 <= now.tv64) {
576
			cpumask_set_cpu(cpu, tmpmask);
577 578 579 580 581 582
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
583
		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
584
			next_event.tv64 = td->evtdev->next_event.tv64;
585 586
			next_cpu = cpu;
		}
587 588
	}

589 590 591 592 593 594
	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

595 596 597 598
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

599 600 601 602 603 604 605
	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

606
	/*
607 608
	 * Wakeup the cpus which have an expired event.
	 */
609
	tick_do_broadcast(tmpmask);
610 611 612 613 614 615 616 617 618 619

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
620
	 */
621
	if (next_event.tv64 != KTIME_MAX) {
622
		/*
623 624
		 * Rearm the broadcast device. If event expired,
		 * repeat the above
625
		 */
626
		if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
627 628
			goto again;
	}
629
	raw_spin_unlock(&tick_broadcast_lock);
630 631 632 633 634 635 636 637 638 639 640
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
void tick_broadcast_oneshot_control(unsigned long reason)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags;
641
	ktime_t now;
642 643 644 645 646 647 648
	int cpu;

	/*
	 * Periodic mode does not care about the enter/exit of power
	 * states
	 */
	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
649
		return;
650

651 652 653 654
	/*
	 * We are called with preemtion disabled from the depth of the
	 * idle code, so we can't be moved away.
	 */
655 656 657 658 659
	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;

	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
660 661 662
		return;

	bc = tick_broadcast_device.evtdev;
663

664
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
665
	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
666
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
667
			WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
668
			clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
669 670 671 672 673 674 675 676 677 678
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
			 * woken by the IPI right away.
			 */
			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
			    dev->next_event.tv64 < bc->next_event.tv64)
679
				tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
680 681
		}
	} else {
682
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
683
			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
684 685 686 687 688 689 690 691 692 693 694 695 696
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

697 698 699 700 701
			/*
			 * Bail out if there is no next event.
			 */
			if (dev->next_event.tv64 == KTIME_MAX)
				goto out;
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
			if (dev->next_event.tv64 <= now.tv64) {
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
743
			tick_program_event(dev->next_event, 1);
744 745
		}
	}
746
out:
747
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
748 749
}

750 751 752 753 754 755 756
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
757
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
758 759
}

760 761
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
762 763 764 765
{
	struct tick_device *td;
	int cpu;

766
	for_each_cpu(cpu, mask) {
767 768 769 770 771 772
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

773
/**
774
 * tick_broadcast_setup_oneshot - setup the broadcast device
775 776 777
 */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
778 779
	int cpu = smp_processor_id();

780 781
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
782 783
		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;

784
		bc->event_handler = tick_handle_oneshot_broadcast;
785 786 787 788 789 790 791

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
792 793 794 795
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
796

797
		if (was_periodic && !cpumask_empty(tmpmask)) {
798
			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
799
			tick_broadcast_init_next_event(tmpmask,
800
						       tick_next_period);
801
			tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
802 803
		} else
			bc->next_event.tv64 = KTIME_MAX;
804 805 806 807 808 809 810 811 812
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
813
	}
814 815 816 817 818 819 820 821 822 823
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

824
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
825 826

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
827 828 829
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
830

831
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
832 833 834 835 836 837 838 839 840 841 842
}


/*
 * Remove a dead CPU from broadcasting
 */
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
	unsigned long flags;
	unsigned int cpu = *cpup;

843
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
844

845
	/*
846 847
	 * Clear the broadcast masks for the dead cpu, but do not stop
	 * the broadcast device!
848
	 */
849
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
850 851
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
852

853
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
854 855
}

856 857 858 859 860 861 862 863
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

864 865 866 867 868 869 870 871 872 873
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

874
#endif
875 876 877

void __init tick_broadcast_init(void)
{
878
	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
879
	zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
880
	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
881
#ifdef CONFIG_TICK_ONESHOT
882 883 884
	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
885 886
#endif
}