tick-broadcast.c 26.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22
#include <linux/module.h>
23 24 25 26 27 28 29 30

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

31
static struct tick_device tick_broadcast_device;
32 33 34
static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
static cpumask_var_t tmpmask __cpumask_var_read_mostly;
35
static int tick_broadcast_forced;
36

37 38
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);

39
#ifdef CONFIG_TICK_ONESHOT
40
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
41
static void tick_broadcast_clear_oneshot(int cpu);
42
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
43
#else
44
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
45
static inline void tick_broadcast_clear_oneshot(int cpu) { }
46
static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
47 48
#endif

49 50 51 52 53 54 55 56
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

57
struct cpumask *tick_get_broadcast_mask(void)
58
{
59
	return tick_broadcast_mask;
60 61
}

62 63 64 65 66
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
67
	if (bc)
68 69 70 71 72 73
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
74 75 76 77
static bool tick_check_broadcast_device(struct clock_event_device *curdev,
					struct clock_event_device *newdev)
{
	if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
78
	    (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
79 80 81 82 83 84 85 86 87 88 89 90 91
	    (newdev->features & CLOCK_EVT_FEAT_C3STOP))
		return false;

	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
	    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
		return false;

	return !curdev || newdev->rating > curdev->rating;
}

/*
 * Conditionally install/replace broadcast device
 */
92
void tick_install_broadcast_device(struct clock_event_device *dev)
93
{
94 95
	struct clock_event_device *cur = tick_broadcast_device.evtdev;

96
	if (!tick_check_broadcast_device(cur, dev))
97
		return;
98

99 100
	if (!try_module_get(dev->owner))
		return;
101

102
	clockevents_exchange_device(cur, dev);
103 104
	if (cur)
		cur->event_handler = clockevents_handle_noop;
105
	tick_broadcast_device.evtdev = dev;
106
	if (!cpumask_empty(tick_broadcast_mask))
107
		tick_broadcast_start_periodic(dev);
108 109 110 111 112 113 114 115 116 117
	/*
	 * Inform all cpus about this. We might be in a situation
	 * where we did not switch to oneshot mode because the per cpu
	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
	 * of a oneshot capable broadcast device. Without that
	 * notification the systems stays stuck in periodic mode
	 * forever.
	 */
	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_clock_notify();
118 119 120 121 122 123 124 125 126 127
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

128 129 130 131 132 133 134 135 136 137 138 139 140
int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
{
	int ret = -ENODEV;

	if (tick_is_broadcast_device(dev)) {
		raw_spin_lock(&tick_broadcast_lock);
		ret = __clockevents_update_freq(dev, freq);
		raw_spin_unlock(&tick_broadcast_lock);
	}
	return ret;
}


141 142 143 144 145
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

146 147 148 149 150 151 152 153 154 155 156
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

157 158 159 160 161 162
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
163
	struct clock_event_device *bc = tick_broadcast_device.evtdev;
164
	unsigned long flags;
165
	int ret = 0;
166

167
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
168 169 170 171 172 173 174 175 176

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
177
		tick_device_setup_broadcast_func(dev);
178
		cpumask_set_cpu(cpu, tick_broadcast_mask);
179 180 181 182
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
183
		ret = 1;
184 185
	} else {
		/*
186 187
		 * Clear the broadcast bit for this cpu if the
		 * device is not power state affected.
188
		 */
189
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
190
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
191
		else
192
			tick_device_setup_broadcast_func(dev);
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

		/*
		 * Clear the broadcast bit if the CPU is not in
		 * periodic broadcast on state.
		 */
		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
			cpumask_clear_cpu(cpu, tick_broadcast_mask);

		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_ONESHOT:
			/*
			 * If the system is in oneshot mode we can
			 * unconditionally clear the oneshot mask bit,
			 * because the CPU is running and therefore
			 * not in an idle state which causes the power
			 * state affected device to stop. Let the
			 * caller initialize the device.
			 */
			tick_broadcast_clear_oneshot(cpu);
			ret = 0;
			break;

		case TICKDEV_MODE_PERIODIC:
			/*
			 * If the system is in periodic mode, check
			 * whether the broadcast device can be
			 * switched off now.
			 */
			if (cpumask_empty(tick_broadcast_mask) && bc)
				clockevents_shutdown(bc);
			/*
			 * If we kept the cpu in the broadcast mask,
			 * tell the caller to leave the per cpu device
			 * in shutdown state. The periodic interrupt
227 228 229
			 * is delivered by the broadcast device, if
			 * the broadcast device exists and is not
			 * hrtimer based.
230
			 */
231 232
			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
				ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
233 234 235
			break;
		default:
			break;
236 237
		}
	}
238
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
239 240 241
	return ret;
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

259
/*
260
 * Broadcast the event to the cpus, which are set in the mask (mangled).
261
 */
262
static bool tick_do_broadcast(struct cpumask *mask)
263
{
264
	int cpu = smp_processor_id();
265
	struct tick_device *td;
266
	bool local = false;
267 268 269 270

	/*
	 * Check, if the current cpu is in the mask
	 */
271
	if (cpumask_test_cpu(cpu, mask)) {
272 273
		struct clock_event_device *bc = tick_broadcast_device.evtdev;

274
		cpumask_clear_cpu(cpu, mask);
275 276 277 278 279 280 281 282 283 284 285 286 287
		/*
		 * We only run the local handler, if the broadcast
		 * device is not hrtimer based. Otherwise we run into
		 * a hrtimer recursion.
		 *
		 * local timer_interrupt()
		 *   local_handler()
		 *     expire_hrtimers()
		 *       bc_handler()
		 *         local_handler()
		 *	     expire_hrtimers()
		 */
		local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
288 289
	}

290
	if (!cpumask_empty(mask)) {
291 292 293 294 295 296
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
297 298
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
299
	}
300
	return local;
301 302 303 304 305 306
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
307
static bool tick_do_periodic_broadcast(void)
308
{
309
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
310
	return tick_do_broadcast(tmpmask);
311 312 313 314 315 316 317
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
318 319
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	bool bc_local;
320

321
	raw_spin_lock(&tick_broadcast_lock);
322 323 324 325 326 327 328

	/* Handle spurious interrupts gracefully */
	if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
		raw_spin_unlock(&tick_broadcast_lock);
		return;
	}

329
	bc_local = tick_do_periodic_broadcast();
330

331
	if (clockevent_state_oneshot(dev)) {
332
		ktime_t next = ktime_add(dev->next_event, tick_period);
333

334 335 336
		clockevents_program_event(dev, next, true);
	}
	raw_spin_unlock(&tick_broadcast_lock);
337 338

	/*
339 340 341
	 * We run the handler of the local cpu after dropping
	 * tick_broadcast_lock because the handler might deadlock when
	 * trying to switch to oneshot mode.
342
	 */
343 344
	if (bc_local)
		td->evtdev->event_handler(td->evtdev);
345 346
}

347 348 349 350 351 352
/**
 * tick_broadcast_control - Enable/disable or force broadcast mode
 * @mode:	The selected broadcast mode
 *
 * Called when the system enters a state where affected tick devices
 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
353
 */
354
void tick_broadcast_control(enum tick_broadcast_mode mode)
355 356 357
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
358
	int cpu, bc_stopped;
359
	unsigned long flags;
360

361 362
	/* Protects also the local clockevent device. */
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
363
	td = this_cpu_ptr(&tick_cpu_device);
364 365 366
	dev = td->evtdev;

	/*
367
	 * Is the device not affected by the powerstate ?
368
	 */
369
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
370
		goto out;
371

372
	if (!tick_device_is_functional(dev))
373
		goto out;
374

375 376
	cpu = smp_processor_id();
	bc = tick_broadcast_device.evtdev;
377
	bc_stopped = cpumask_empty(tick_broadcast_mask);
378

379 380 381 382
	switch (mode) {
	case TICK_BROADCAST_FORCE:
		tick_broadcast_forced = 1;
	case TICK_BROADCAST_ON:
383
		cpumask_set_cpu(cpu, tick_broadcast_on);
384
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
385 386 387 388 389 390 391 392 393 394
			/*
			 * Only shutdown the cpu local device, if:
			 *
			 * - the broadcast device exists
			 * - the broadcast device is not a hrtimer based one
			 * - the broadcast device is in periodic mode to
			 *   avoid a hickup during switch to oneshot mode
			 */
			if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
			    tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
395
				clockevents_shutdown(dev);
396
		}
397
		break;
398 399 400

	case TICK_BROADCAST_OFF:
		if (tick_broadcast_forced)
401 402 403 404 405
			break;
		cpumask_clear_cpu(cpu, tick_broadcast_on);
		if (!tick_device_is_functional(dev))
			break;
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
406 407
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
408 409
				tick_setup_periodic(dev, 0);
		}
410
		break;
411 412
	}

413 414 415 416 417 418 419 420 421 422
	if (bc) {
		if (cpumask_empty(tick_broadcast_mask)) {
			if (!bc_stopped)
				clockevents_shutdown(bc);
		} else if (bc_stopped) {
			if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
				tick_broadcast_start_periodic(bc);
			else
				tick_broadcast_setup_oneshot(bc);
		}
423
	}
424 425
out:
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
426
}
427
EXPORT_SYMBOL_GPL(tick_broadcast_control);
428 429 430 431 432 433 434 435 436 437 438 439

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

440
#ifdef CONFIG_HOTPLUG_CPU
441 442 443
/*
 * Remove a CPU from broadcasting
 */
444
void tick_shutdown_broadcast(unsigned int cpu)
445 446 447 448
{
	struct clock_event_device *bc;
	unsigned long flags;

449
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
450 451

	bc = tick_broadcast_device.evtdev;
452
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
453
	cpumask_clear_cpu(cpu, tick_broadcast_on);
454 455

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
456
		if (bc && cpumask_empty(tick_broadcast_mask))
457
			clockevents_shutdown(bc);
458 459
	}

460
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
461
}
462
#endif
463

464 465 466 467 468
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

469
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
470 471

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
472
	if (bc)
473
		clockevents_shutdown(bc);
474

475
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
476 477
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
/*
 * This is called from tick_resume_local() on a resuming CPU. That's
 * called from the core resume function, tick_unfreeze() and the magic XEN
 * resume hackery.
 *
 * In none of these cases the broadcast device mode can change and the
 * bit of the resuming CPU in the broadcast mask is safe as well.
 */
bool tick_resume_check_broadcast(void)
{
	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
		return false;
	else
		return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
}

void tick_resume_broadcast(void)
495 496 497 498
{
	struct clock_event_device *bc;
	unsigned long flags;

499
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
500 501 502

	bc = tick_broadcast_device.evtdev;

503
	if (bc) {
504
		clockevents_tick_resume(bc);
T
Thomas Gleixner 已提交
505

506 507
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
508
			if (!cpumask_empty(tick_broadcast_mask))
509 510 511
				tick_broadcast_start_periodic(bc);
			break;
		case TICKDEV_MODE_ONESHOT:
512
			if (!cpumask_empty(tick_broadcast_mask))
513
				tick_resume_broadcast_oneshot(bc);
514 515
			break;
		}
516
	}
517
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
518 519
}

520 521
#ifdef CONFIG_TICK_ONESHOT

522 523 524
static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
525

526
/*
527
 * Exposed for debugging: see timer_list.c
528
 */
529
struct cpumask *tick_get_broadcast_oneshot_mask(void)
530
{
531
	return tick_broadcast_oneshot_mask;
532 533
}

534 535 536 537 538 539 540 541 542 543 544 545
/*
 * Called before going idle with interrupts disabled. Checks whether a
 * broadcast event from the other core is about to happen. We detected
 * that in tick_broadcast_oneshot_control(). The callsite can use this
 * to avoid a deep idle transition as we are about to get the
 * broadcast IPI right away.
 */
int tick_check_broadcast_expired(void)
{
	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
}

546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

562 563
static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
				     ktime_t expires)
564
{
565
	if (!clockevent_state_oneshot(bc))
566
		clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
567

568 569
	clockevents_program_event(bc, expires, 1);
	tick_broadcast_set_affinity(bc, cpumask_of(cpu));
570 571
}

572
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
573
{
574
	clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
575 576
}

577 578 579 580
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
581
void tick_check_oneshot_broadcast_this_cpu(void)
582
{
583
	if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
584
		struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
585

586 587 588 589 590 591
		/*
		 * We might be in the middle of switching over from
		 * periodic to oneshot. If the CPU has not yet
		 * switched over, leave the device alone.
		 */
		if (td->mode == TICKDEV_MODE_ONESHOT) {
592
			clockevents_switch_state(td->evtdev,
593
					      CLOCK_EVT_STATE_ONESHOT);
594
		}
595 596 597
	}
}

598 599 600 601 602 603
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
604
	ktime_t now, next_event;
605
	int cpu, next_cpu = 0;
606
	bool bc_local;
607

608
	raw_spin_lock(&tick_broadcast_lock);
T
Thomas Gleixner 已提交
609 610
	dev->next_event = KTIME_MAX;
	next_event = KTIME_MAX;
611
	cpumask_clear(tmpmask);
612 613
	now = ktime_get();
	/* Find all expired events */
614
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
615
		td = &per_cpu(tick_cpu_device, cpu);
T
Thomas Gleixner 已提交
616
		if (td->evtdev->next_event <= now) {
617
			cpumask_set_cpu(cpu, tmpmask);
618 619 620 621 622 623
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
T
Thomas Gleixner 已提交
624 625
		} else if (td->evtdev->next_event < next_event) {
			next_event = td->evtdev->next_event;
626 627
			next_cpu = cpu;
		}
628 629
	}

630 631 632 633 634 635
	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

636 637 638 639
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

640 641 642 643 644 645 646
	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

647
	/*
648
	 * Wakeup the cpus which have an expired event.
649
	 */
650
	bc_local = tick_do_broadcast(tmpmask);
651 652 653 654 655 656 657 658 659 660

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
661
	 */
T
Thomas Gleixner 已提交
662
	if (next_event != KTIME_MAX)
663 664
		tick_broadcast_set_event(dev, next_cpu, next_event);

665
	raw_spin_unlock(&tick_broadcast_lock);
666 667 668 669 670

	if (bc_local) {
		td = this_cpu_ptr(&tick_cpu_device);
		td->evtdev->event_handler(td->evtdev);
	}
671 672
}

673 674 675 676
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
{
	if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
		return 0;
T
Thomas Gleixner 已提交
677
	if (bc->next_event == KTIME_MAX)
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		return 0;
	return bc->bound_on == cpu ? -EBUSY : 0;
}

static void broadcast_shutdown_local(struct clock_event_device *bc,
				     struct clock_event_device *dev)
{
	/*
	 * For hrtimer based broadcasting we cannot shutdown the cpu
	 * local device if our own event is the first one to expire or
	 * if we own the broadcast timer.
	 */
	if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
		if (broadcast_needs_cpu(bc, smp_processor_id()))
			return;
T
Thomas Gleixner 已提交
693
		if (dev->next_event < bc->next_event)
694 695
			return;
	}
696
	clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
697 698
}

699
int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
700 701
{
	struct clock_event_device *bc, *dev;
702
	int cpu, ret = 0;
703
	ktime_t now;
704

705 706 707 708 709 710 711
	/*
	 * If there is no broadcast device, tell the caller not to go
	 * into deep idle.
	 */
	if (!tick_broadcast_device.evtdev)
		return -EBUSY;

712
	dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
713

714
	raw_spin_lock(&tick_broadcast_lock);
715
	bc = tick_broadcast_device.evtdev;
716
	cpu = smp_processor_id();
717

718
	if (state == TICK_BROADCAST_ENTER) {
719 720 721 722 723 724 725 726 727 728 729
		/*
		 * If the current CPU owns the hrtimer broadcast
		 * mechanism, it cannot go deep idle and we do not add
		 * the CPU to the broadcast mask. We don't have to go
		 * through the EXIT path as the local timer is not
		 * shutdown.
		 */
		ret = broadcast_needs_cpu(bc, cpu);
		if (ret)
			goto out;

730 731 732 733
		/*
		 * If the broadcast device is in periodic mode, we
		 * return.
		 */
734 735 736 737
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
			/* If it is a hrtimer based broadcast, return busy */
			if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
				ret = -EBUSY;
738
			goto out;
739
		}
740

741
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
742
			WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
743 744

			/* Conditionally shut down the local timer. */
745
			broadcast_shutdown_local(bc, dev);
746

747 748 749 750 751 752
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
753 754 755
			 * woken by the IPI right away; we return
			 * busy, so the CPU does not try to go deep
			 * idle.
756
			 */
757 758
			if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
				ret = -EBUSY;
T
Thomas Gleixner 已提交
759
			} else if (dev->next_event < bc->next_event) {
760
				tick_broadcast_set_event(bc, cpu, dev->next_event);
761 762 763 764 765 766 767 768 769 770 771 772
				/*
				 * In case of hrtimer broadcasts the
				 * programming might have moved the
				 * timer to this cpu. If yes, remove
				 * us from the broadcast mask and
				 * return busy.
				 */
				ret = broadcast_needs_cpu(bc, cpu);
				if (ret) {
					cpumask_clear_cpu(cpu,
						tick_broadcast_oneshot_mask);
				}
773
			}
774 775
		}
	} else {
776
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
777
			clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
778 779 780 781 782 783 784 785 786 787 788 789 790
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

791 792 793
			/*
			 * Bail out if there is no next event.
			 */
T
Thomas Gleixner 已提交
794
			if (dev->next_event == KTIME_MAX)
795
				goto out;
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
T
Thomas Gleixner 已提交
829
			if (dev->next_event <= now) {
830 831 832 833 834 835 836
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
837
			tick_program_event(dev->next_event, 1);
838 839
		}
	}
840
out:
841
	raw_spin_unlock(&tick_broadcast_lock);
842
	return ret;
843 844
}

845 846 847 848 849 850 851
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
852
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
853
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
854 855
}

856 857
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
858 859 860 861
{
	struct tick_device *td;
	int cpu;

862
	for_each_cpu(cpu, mask) {
863 864 865 866 867 868
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

869
/**
870
 * tick_broadcast_setup_oneshot - setup the broadcast device
871
 */
872
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
873
{
874 875
	int cpu = smp_processor_id();

876 877 878
	if (!bc)
		return;

879 880
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
881
		int was_periodic = clockevent_state_periodic(bc);
882

883
		bc->event_handler = tick_handle_oneshot_broadcast;
884 885 886 887 888 889 890

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
891 892 893 894
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
895

896
		if (was_periodic && !cpumask_empty(tmpmask)) {
897
			clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
898
			tick_broadcast_init_next_event(tmpmask,
899
						       tick_next_period);
900
			tick_broadcast_set_event(bc, cpu, tick_next_period);
901
		} else
T
Thomas Gleixner 已提交
902
			bc->next_event = KTIME_MAX;
903 904 905 906 907 908 909 910 911
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
912
	}
913 914 915 916 917 918 919 920 921 922
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

923
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
924 925

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
926 927 928
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
929

930
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
931 932
}

933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
#ifdef CONFIG_HOTPLUG_CPU
void hotplug_cpu__broadcast_tick_pull(int deadcpu)
{
	struct clock_event_device *bc;
	unsigned long flags;

	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
	bc = tick_broadcast_device.evtdev;

	if (bc && broadcast_needs_cpu(bc, deadcpu)) {
		/* This moves the broadcast assignment to this CPU: */
		clockevents_program_event(bc, bc->next_event, 1);
	}
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
948 949 950 951

/*
 * Remove a dead CPU from broadcasting
 */
952
void tick_shutdown_broadcast_oneshot(unsigned int cpu)
953 954 955
{
	unsigned long flags;

956
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
957

958
	/*
959 960
	 * Clear the broadcast masks for the dead cpu, but do not stop
	 * the broadcast device!
961
	 */
962
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
963 964
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
965

966
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
967
}
968
#endif
969

970 971 972 973 974 975 976 977
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

978 979 980 981 982 983 984 985 986 987
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

988 989 990 991 992 993 994 995 996 997
#else
int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
		return -EBUSY;

	return 0;
}
998
#endif
999 1000 1001

void __init tick_broadcast_init(void)
{
1002
	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
1003
	zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
1004
	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
1005
#ifdef CONFIG_TICK_ONESHOT
1006 1007 1008
	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
1009 1010
#endif
}