tick-broadcast.c 25.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22
#include <linux/module.h>
23 24 25 26 27 28 29 30

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

31
static struct tick_device tick_broadcast_device;
32
static cpumask_var_t tick_broadcast_mask;
33
static cpumask_var_t tick_broadcast_on;
34
static cpumask_var_t tmpmask;
35
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
36
static int tick_broadcast_forced;
37

38 39
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
40
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
41 42
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
43
static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
44 45
#endif

46 47 48 49 50 51 52 53
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

54
struct cpumask *tick_get_broadcast_mask(void)
55
{
56
	return tick_broadcast_mask;
57 58
}

59 60 61 62 63
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
64
	if (bc)
65 66 67 68 69 70
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
71 72 73 74
static bool tick_check_broadcast_device(struct clock_event_device *curdev,
					struct clock_event_device *newdev)
{
	if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
75
	    (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
76 77 78 79 80 81 82 83 84 85 86 87 88
	    (newdev->features & CLOCK_EVT_FEAT_C3STOP))
		return false;

	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
	    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
		return false;

	return !curdev || newdev->rating > curdev->rating;
}

/*
 * Conditionally install/replace broadcast device
 */
89
void tick_install_broadcast_device(struct clock_event_device *dev)
90
{
91 92
	struct clock_event_device *cur = tick_broadcast_device.evtdev;

93
	if (!tick_check_broadcast_device(cur, dev))
94
		return;
95

96 97
	if (!try_module_get(dev->owner))
		return;
98

99
	clockevents_exchange_device(cur, dev);
100 101
	if (cur)
		cur->event_handler = clockevents_handle_noop;
102
	tick_broadcast_device.evtdev = dev;
103
	if (!cpumask_empty(tick_broadcast_mask))
104
		tick_broadcast_start_periodic(dev);
105 106 107 108 109 110 111 112 113 114
	/*
	 * Inform all cpus about this. We might be in a situation
	 * where we did not switch to oneshot mode because the per cpu
	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
	 * of a oneshot capable broadcast device. Without that
	 * notification the systems stays stuck in periodic mode
	 * forever.
	 */
	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_clock_notify();
115 116 117 118 119 120 121 122 123 124
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

125 126 127 128 129 130 131 132 133 134 135 136 137
int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
{
	int ret = -ENODEV;

	if (tick_is_broadcast_device(dev)) {
		raw_spin_lock(&tick_broadcast_lock);
		ret = __clockevents_update_freq(dev, freq);
		raw_spin_unlock(&tick_broadcast_lock);
	}
	return ret;
}


138 139 140 141 142
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

143 144 145 146 147 148 149 150 151 152 153
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

154 155 156 157 158 159
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
160
	struct clock_event_device *bc = tick_broadcast_device.evtdev;
161
	unsigned long flags;
162
	int ret = 0;
163

164
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
165 166 167 168 169 170 171 172 173

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
174
		tick_device_setup_broadcast_func(dev);
175
		cpumask_set_cpu(cpu, tick_broadcast_mask);
176 177 178 179
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
180
		ret = 1;
181 182
	} else {
		/*
183 184
		 * Clear the broadcast bit for this cpu if the
		 * device is not power state affected.
185
		 */
186
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
187
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
188
		else
189
			tick_device_setup_broadcast_func(dev);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

		/*
		 * Clear the broadcast bit if the CPU is not in
		 * periodic broadcast on state.
		 */
		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
			cpumask_clear_cpu(cpu, tick_broadcast_mask);

		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_ONESHOT:
			/*
			 * If the system is in oneshot mode we can
			 * unconditionally clear the oneshot mask bit,
			 * because the CPU is running and therefore
			 * not in an idle state which causes the power
			 * state affected device to stop. Let the
			 * caller initialize the device.
			 */
			tick_broadcast_clear_oneshot(cpu);
			ret = 0;
			break;

		case TICKDEV_MODE_PERIODIC:
			/*
			 * If the system is in periodic mode, check
			 * whether the broadcast device can be
			 * switched off now.
			 */
			if (cpumask_empty(tick_broadcast_mask) && bc)
				clockevents_shutdown(bc);
			/*
			 * If we kept the cpu in the broadcast mask,
			 * tell the caller to leave the per cpu device
			 * in shutdown state. The periodic interrupt
224 225 226
			 * is delivered by the broadcast device, if
			 * the broadcast device exists and is not
			 * hrtimer based.
227
			 */
228 229
			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
				ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
230 231 232
			break;
		default:
			break;
233 234
		}
	}
235
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
236 237 238
	return ret;
}

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

256
/*
257
 * Broadcast the event to the cpus, which are set in the mask (mangled).
258
 */
259
static bool tick_do_broadcast(struct cpumask *mask)
260
{
261
	int cpu = smp_processor_id();
262
	struct tick_device *td;
263
	bool local = false;
264 265 266 267

	/*
	 * Check, if the current cpu is in the mask
	 */
268
	if (cpumask_test_cpu(cpu, mask)) {
269 270
		struct clock_event_device *bc = tick_broadcast_device.evtdev;

271
		cpumask_clear_cpu(cpu, mask);
272 273 274 275 276 277 278 279 280 281 282 283 284
		/*
		 * We only run the local handler, if the broadcast
		 * device is not hrtimer based. Otherwise we run into
		 * a hrtimer recursion.
		 *
		 * local timer_interrupt()
		 *   local_handler()
		 *     expire_hrtimers()
		 *       bc_handler()
		 *         local_handler()
		 *	     expire_hrtimers()
		 */
		local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
285 286
	}

287
	if (!cpumask_empty(mask)) {
288 289 290 291 292 293
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
294 295
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
296
	}
297
	return local;
298 299 300 301 302 303
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
304
static bool tick_do_periodic_broadcast(void)
305
{
306
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
307
	return tick_do_broadcast(tmpmask);
308 309 310 311 312 313 314
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
315 316
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	bool bc_local;
317

318
	raw_spin_lock(&tick_broadcast_lock);
319
	bc_local = tick_do_periodic_broadcast();
320

321
	if (clockevent_state_oneshot(dev)) {
322
		ktime_t next = ktime_add(dev->next_event, tick_period);
323

324 325 326
		clockevents_program_event(dev, next, true);
	}
	raw_spin_unlock(&tick_broadcast_lock);
327 328

	/*
329 330 331
	 * We run the handler of the local cpu after dropping
	 * tick_broadcast_lock because the handler might deadlock when
	 * trying to switch to oneshot mode.
332
	 */
333 334
	if (bc_local)
		td->evtdev->event_handler(td->evtdev);
335 336
}

337 338 339 340 341 342 343 344 345 346
/**
 * tick_broadcast_control - Enable/disable or force broadcast mode
 * @mode:	The selected broadcast mode
 *
 * Called when the system enters a state where affected tick devices
 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
 *
 * Called with interrupts disabled, so clockevents_lock is not
 * required here because the local clock event device cannot go away
 * under us.
347
 */
348
void tick_broadcast_control(enum tick_broadcast_mode mode)
349 350 351
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
352
	int cpu, bc_stopped;
353

354
	td = this_cpu_ptr(&tick_cpu_device);
355 356 357
	dev = td->evtdev;

	/*
358
	 * Is the device not affected by the powerstate ?
359
	 */
360
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
361
		return;
362

363
	if (!tick_device_is_functional(dev))
364
		return;
365

366 367 368
	raw_spin_lock(&tick_broadcast_lock);
	cpu = smp_processor_id();
	bc = tick_broadcast_device.evtdev;
369
	bc_stopped = cpumask_empty(tick_broadcast_mask);
370

371 372 373 374
	switch (mode) {
	case TICK_BROADCAST_FORCE:
		tick_broadcast_forced = 1;
	case TICK_BROADCAST_ON:
375
		cpumask_set_cpu(cpu, tick_broadcast_on);
376
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
377 378 379 380 381 382 383 384 385 386
			/*
			 * Only shutdown the cpu local device, if:
			 *
			 * - the broadcast device exists
			 * - the broadcast device is not a hrtimer based one
			 * - the broadcast device is in periodic mode to
			 *   avoid a hickup during switch to oneshot mode
			 */
			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
			    tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
387
				clockevents_shutdown(dev);
388
		}
389
		break;
390 391 392

	case TICK_BROADCAST_OFF:
		if (tick_broadcast_forced)
393 394 395 396 397
			break;
		cpumask_clear_cpu(cpu, tick_broadcast_on);
		if (!tick_device_is_functional(dev))
			break;
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
398 399
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
400 401
				tick_setup_periodic(dev, 0);
		}
402
		break;
403 404
	}

405
	if (cpumask_empty(tick_broadcast_mask)) {
406
		if (!bc_stopped)
407
			clockevents_shutdown(bc);
408
	} else if (bc_stopped) {
409 410
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
411 412
		else
			tick_broadcast_setup_oneshot(bc);
413
	}
414
	raw_spin_unlock(&tick_broadcast_lock);
415
}
416
EXPORT_SYMBOL_GPL(tick_broadcast_control);
417 418 419 420 421 422 423 424 425 426 427 428

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

429
#ifdef CONFIG_HOTPLUG_CPU
430 431 432
/*
 * Remove a CPU from broadcasting
 */
433
void tick_shutdown_broadcast(unsigned int cpu)
434 435 436 437
{
	struct clock_event_device *bc;
	unsigned long flags;

438
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
439 440

	bc = tick_broadcast_device.evtdev;
441
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
442
	cpumask_clear_cpu(cpu, tick_broadcast_on);
443 444

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
445
		if (bc && cpumask_empty(tick_broadcast_mask))
446
			clockevents_shutdown(bc);
447 448
	}

449
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
450
}
451
#endif
452

453 454 455 456 457
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

458
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
459 460

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
461
	if (bc)
462
		clockevents_shutdown(bc);
463

464
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
465 466
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/*
 * This is called from tick_resume_local() on a resuming CPU. That's
 * called from the core resume function, tick_unfreeze() and the magic XEN
 * resume hackery.
 *
 * In none of these cases the broadcast device mode can change and the
 * bit of the resuming CPU in the broadcast mask is safe as well.
 */
bool tick_resume_check_broadcast(void)
{
	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
		return false;
	else
		return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
}

void tick_resume_broadcast(void)
484 485 486 487
{
	struct clock_event_device *bc;
	unsigned long flags;

488
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
489 490 491

	bc = tick_broadcast_device.evtdev;

492
	if (bc) {
493
		clockevents_tick_resume(bc);
T
Thomas Gleixner 已提交
494

495 496
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
497
			if (!cpumask_empty(tick_broadcast_mask))
498 499 500
				tick_broadcast_start_periodic(bc);
			break;
		case TICKDEV_MODE_ONESHOT:
501
			if (!cpumask_empty(tick_broadcast_mask))
502
				tick_resume_broadcast_oneshot(bc);
503 504
			break;
		}
505
	}
506
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
507 508
}

509 510
#ifdef CONFIG_TICK_ONESHOT

511
static cpumask_var_t tick_broadcast_oneshot_mask;
512
static cpumask_var_t tick_broadcast_pending_mask;
513
static cpumask_var_t tick_broadcast_force_mask;
514

515
/*
516
 * Exposed for debugging: see timer_list.c
517
 */
518
struct cpumask *tick_get_broadcast_oneshot_mask(void)
519
{
520
	return tick_broadcast_oneshot_mask;
521 522
}

523 524 525 526 527 528 529 530 531 532 533 534
/*
 * Called before going idle with interrupts disabled. Checks whether a
 * broadcast event from the other core is about to happen. We detected
 * that in tick_broadcast_oneshot_control(). The callsite can use this
 * to avoid a deep idle transition as we are about to get the
 * broadcast IPI right away.
 */
int tick_check_broadcast_expired(void)
{
	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
}

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

551 552
static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
				     ktime_t expires)
553
{
554
	if (!clockevent_state_oneshot(bc))
555
		clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
556

557 558
	clockevents_program_event(bc, expires, 1);
	tick_broadcast_set_affinity(bc, cpumask_of(cpu));
559 560
}

561
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
562
{
563
	clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
564 565
}

566 567 568 569
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
570
void tick_check_oneshot_broadcast_this_cpu(void)
571
{
572
	if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
573
		struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
574

575 576 577 578 579 580
		/*
		 * We might be in the middle of switching over from
		 * periodic to oneshot. If the CPU has not yet
		 * switched over, leave the device alone.
		 */
		if (td->mode == TICKDEV_MODE_ONESHOT) {
581
			clockevents_switch_state(td->evtdev,
582
					      CLOCK_EVT_STATE_ONESHOT);
583
		}
584 585 586
	}
}

587 588 589 590 591 592
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
593
	ktime_t now, next_event;
594
	int cpu, next_cpu = 0;
595
	bool bc_local;
596

597
	raw_spin_lock(&tick_broadcast_lock);
598
	dev->next_event.tv64 = KTIME_MAX;
599
	next_event.tv64 = KTIME_MAX;
600
	cpumask_clear(tmpmask);
601 602
	now = ktime_get();
	/* Find all expired events */
603
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
604
		td = &per_cpu(tick_cpu_device, cpu);
605
		if (td->evtdev->next_event.tv64 <= now.tv64) {
606
			cpumask_set_cpu(cpu, tmpmask);
607 608 609 610 611 612
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
613
		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
614
			next_event.tv64 = td->evtdev->next_event.tv64;
615 616
			next_cpu = cpu;
		}
617 618
	}

619 620 621 622 623 624
	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

625 626 627 628
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

629 630 631 632 633 634 635
	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

636
	/*
637
	 * Wakeup the cpus which have an expired event.
638
	 */
639
	bc_local = tick_do_broadcast(tmpmask);
640 641 642 643 644 645 646 647 648 649

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
650
	 */
651 652 653
	if (next_event.tv64 != KTIME_MAX)
		tick_broadcast_set_event(dev, next_cpu, next_event);

654
	raw_spin_unlock(&tick_broadcast_lock);
655 656 657 658 659

	if (bc_local) {
		td = this_cpu_ptr(&tick_cpu_device);
		td->evtdev->event_handler(td->evtdev);
	}
660 661
}

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
{
	if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
		return 0;
	if (bc->next_event.tv64 == KTIME_MAX)
		return 0;
	return bc->bound_on == cpu ? -EBUSY : 0;
}

static void broadcast_shutdown_local(struct clock_event_device *bc,
				     struct clock_event_device *dev)
{
	/*
	 * For hrtimer based broadcasting we cannot shutdown the cpu
	 * local device if our own event is the first one to expire or
	 * if we own the broadcast timer.
	 */
	if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
		if (broadcast_needs_cpu(bc, smp_processor_id()))
			return;
		if (dev->next_event.tv64 < bc->next_event.tv64)
			return;
	}
685
	clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
686 687
}

688
int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
689 690 691
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
692
	int cpu, ret = 0;
693
	ktime_t now;
694

695 696 697 698 699 700 701
	/*
	 * If there is no broadcast device, tell the caller not to go
	 * into deep idle.
	 */
	if (!tick_broadcast_device.evtdev)
		return -EBUSY;

702 703 704 705 706
	/*
	 * Periodic mode does not care about the enter/exit of power
	 * states
	 */
	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
707
		return 0;
708

709 710 711 712
	/*
	 * We are called with preemtion disabled from the depth of the
	 * idle code, so we can't be moved away.
	 */
713
	td = this_cpu_ptr(&tick_cpu_device);
714 715
	dev = td->evtdev;

716
	raw_spin_lock(&tick_broadcast_lock);
717
	bc = tick_broadcast_device.evtdev;
718
	cpu = smp_processor_id();
719

720
	if (state == TICK_BROADCAST_ENTER) {
721
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
722
			WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
723
			broadcast_shutdown_local(bc, dev);
724 725 726 727 728 729 730 731 732 733
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
			 * woken by the IPI right away.
			 */
			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
			    dev->next_event.tv64 < bc->next_event.tv64)
734
				tick_broadcast_set_event(bc, cpu, dev->next_event);
735
		}
736 737 738 739 740 741 742 743 744 745
		/*
		 * If the current CPU owns the hrtimer broadcast
		 * mechanism, it cannot go deep idle and we remove the
		 * CPU from the broadcast mask. We don't have to go
		 * through the EXIT path as the local timer is not
		 * shutdown.
		 */
		ret = broadcast_needs_cpu(bc, cpu);
		if (ret)
			cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
746
	} else {
747
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
748
			clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
749 750 751 752 753 754 755 756 757 758 759 760 761
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

762 763 764 765 766
			/*
			 * Bail out if there is no next event.
			 */
			if (dev->next_event.tv64 == KTIME_MAX)
				goto out;
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
			if (dev->next_event.tv64 <= now.tv64) {
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
808
			tick_program_event(dev->next_event, 1);
809 810
		}
	}
811
out:
812
	raw_spin_unlock(&tick_broadcast_lock);
813
	return ret;
814
}
815
EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
816

817 818 819 820 821 822 823
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
824
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
825
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
826 827
}

828 829
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
830 831 832 833
{
	struct tick_device *td;
	int cpu;

834
	for_each_cpu(cpu, mask) {
835 836 837 838 839 840
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

841
/**
842
 * tick_broadcast_setup_oneshot - setup the broadcast device
843 844 845
 */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
846 847
	int cpu = smp_processor_id();

848 849
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
850
		int was_periodic = clockevent_state_periodic(bc);
851

852
		bc->event_handler = tick_handle_oneshot_broadcast;
853 854 855 856 857 858 859

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
860 861 862 863
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
864

865
		if (was_periodic && !cpumask_empty(tmpmask)) {
866
			clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
867
			tick_broadcast_init_next_event(tmpmask,
868
						       tick_next_period);
869
			tick_broadcast_set_event(bc, cpu, tick_next_period);
870 871
		} else
			bc->next_event.tv64 = KTIME_MAX;
872 873 874 875 876 877 878 879 880
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
881
	}
882 883 884 885 886 887 888 889 890 891
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

892
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
893 894

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
895 896 897
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
898

899
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
900 901
}

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
#ifdef CONFIG_HOTPLUG_CPU
void hotplug_cpu__broadcast_tick_pull(int deadcpu)
{
	struct clock_event_device *bc;
	unsigned long flags;

	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
	bc = tick_broadcast_device.evtdev;

	if (bc && broadcast_needs_cpu(bc, deadcpu)) {
		/* This moves the broadcast assignment to this CPU: */
		clockevents_program_event(bc, bc->next_event, 1);
	}
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
917 918 919 920

/*
 * Remove a dead CPU from broadcasting
 */
921
void tick_shutdown_broadcast_oneshot(unsigned int cpu)
922 923 924
{
	unsigned long flags;

925
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
926

927
	/*
928 929
	 * Clear the broadcast masks for the dead cpu, but do not stop
	 * the broadcast device!
930
	 */
931
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
932 933
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
934

935
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
936
}
937
#endif
938

939 940 941 942 943 944 945 946
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

947 948 949 950 951 952 953 954 955 956
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

957 958 959 960 961 962 963 964 965 966
#else
int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
		return -EBUSY;

	return 0;
}
967
#endif
968 969 970

void __init tick_broadcast_init(void)
{
971
	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
972
	zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
973
	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
974
#ifdef CONFIG_TICK_ONESHOT
975 976 977
	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
978 979
#endif
}