tick-broadcast.c 24.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22
#include <linux/module.h>
23 24 25 26 27 28 29 30

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

31
static struct tick_device tick_broadcast_device;
32
static cpumask_var_t tick_broadcast_mask;
33
static cpumask_var_t tick_broadcast_on;
34
static cpumask_var_t tmpmask;
35
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
36
static int tick_broadcast_force;
37

38 39 40 41 42 43
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif

44 45 46 47 48 49 50 51
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

52
struct cpumask *tick_get_broadcast_mask(void)
53
{
54
	return tick_broadcast_mask;
55 56
}

57 58 59 60 61
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
62
	if (bc)
63 64 65 66 67 68
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
69 70 71 72
static bool tick_check_broadcast_device(struct clock_event_device *curdev,
					struct clock_event_device *newdev)
{
	if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
73
	    (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
74 75 76 77 78 79 80 81 82 83 84 85 86
	    (newdev->features & CLOCK_EVT_FEAT_C3STOP))
		return false;

	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
	    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
		return false;

	return !curdev || newdev->rating > curdev->rating;
}

/*
 * Conditionally install/replace broadcast device
 */
87
void tick_install_broadcast_device(struct clock_event_device *dev)
88
{
89 90
	struct clock_event_device *cur = tick_broadcast_device.evtdev;

91
	if (!tick_check_broadcast_device(cur, dev))
92
		return;
93

94 95
	if (!try_module_get(dev->owner))
		return;
96

97
	clockevents_exchange_device(cur, dev);
98 99
	if (cur)
		cur->event_handler = clockevents_handle_noop;
100
	tick_broadcast_device.evtdev = dev;
101
	if (!cpumask_empty(tick_broadcast_mask))
102
		tick_broadcast_start_periodic(dev);
103 104 105 106 107 108 109 110 111 112
	/*
	 * Inform all cpus about this. We might be in a situation
	 * where we did not switch to oneshot mode because the per cpu
	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
	 * of a oneshot capable broadcast device. Without that
	 * notification the systems stays stuck in periodic mode
	 * forever.
	 */
	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_clock_notify();
113 114 115 116 117 118 119 120 121 122
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

123 124 125 126 127 128 129 130 131 132 133 134 135
int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
{
	int ret = -ENODEV;

	if (tick_is_broadcast_device(dev)) {
		raw_spin_lock(&tick_broadcast_lock);
		ret = __clockevents_update_freq(dev, freq);
		raw_spin_unlock(&tick_broadcast_lock);
	}
	return ret;
}


136 137 138 139 140
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

141 142 143 144 145 146 147 148 149 150 151
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

152 153 154 155 156 157
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
158
	struct clock_event_device *bc = tick_broadcast_device.evtdev;
159
	unsigned long flags;
160
	int ret;
161

162
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
163 164 165 166 167 168 169 170 171

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
172
		tick_device_setup_broadcast_func(dev);
173
		cpumask_set_cpu(cpu, tick_broadcast_mask);
174 175 176 177
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
178
		ret = 1;
179 180
	} else {
		/*
181 182
		 * Clear the broadcast bit for this cpu if the
		 * device is not power state affected.
183
		 */
184
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
185
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
186
		else
187
			tick_device_setup_broadcast_func(dev);
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229

		/*
		 * Clear the broadcast bit if the CPU is not in
		 * periodic broadcast on state.
		 */
		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
			cpumask_clear_cpu(cpu, tick_broadcast_mask);

		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_ONESHOT:
			/*
			 * If the system is in oneshot mode we can
			 * unconditionally clear the oneshot mask bit,
			 * because the CPU is running and therefore
			 * not in an idle state which causes the power
			 * state affected device to stop. Let the
			 * caller initialize the device.
			 */
			tick_broadcast_clear_oneshot(cpu);
			ret = 0;
			break;

		case TICKDEV_MODE_PERIODIC:
			/*
			 * If the system is in periodic mode, check
			 * whether the broadcast device can be
			 * switched off now.
			 */
			if (cpumask_empty(tick_broadcast_mask) && bc)
				clockevents_shutdown(bc);
			/*
			 * If we kept the cpu in the broadcast mask,
			 * tell the caller to leave the per cpu device
			 * in shutdown state. The periodic interrupt
			 * is delivered by the broadcast device.
			 */
			ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
			break;
		default:
			/* Nothing to do */
			ret = 0;
			break;
230 231
		}
	}
232
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
233 234 235
	return ret;
}

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

253
/*
254
 * Broadcast the event to the cpus, which are set in the mask (mangled).
255
 */
256
static void tick_do_broadcast(struct cpumask *mask)
257
{
258
	int cpu = smp_processor_id();
259 260 261 262 263
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
264 265
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
266 267 268 269
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

270
	if (!cpumask_empty(mask)) {
271 272 273 274 275 276
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
277 278
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
279 280 281 282 283 284 285 286 287
	}
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
288 289
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	tick_do_broadcast(tmpmask);
290 291 292 293 294 295 296
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
297 298
	ktime_t next;

299 300
	raw_spin_lock(&tick_broadcast_lock);

301 302 303 304 305 306
	tick_do_periodic_broadcast();

	/*
	 * The device is in periodic mode. No reprogramming necessary:
	 */
	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
307
		goto unlock;
308 309 310

	/*
	 * Setup the next period for devices, which do not have
311
	 * periodic mode. We read dev->next_event first and add to it
312
	 * when the event already expired. clockevents_program_event()
313 314
	 * sets dev->next_event only when the event is really
	 * programmed to the device.
315
	 */
316 317
	for (next = dev->next_event; ;) {
		next = ktime_add(next, tick_period);
318

319
		if (!clockevents_program_event(dev, next, false))
320
			goto unlock;
321 322
		tick_do_periodic_broadcast();
	}
323 324
unlock:
	raw_spin_unlock(&tick_broadcast_lock);
325 326 327 328 329 330
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
331
static void tick_do_broadcast_on_off(unsigned long *reason)
332 333 334
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
335
	unsigned long flags;
336
	int cpu, bc_stopped;
337

338
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
339 340 341 342 343 344 345

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
346
	 * Is the device not affected by the powerstate ?
347
	 */
348
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
349 350
		goto out;

351 352
	if (!tick_device_is_functional(dev))
		goto out;
353

354
	bc_stopped = cpumask_empty(tick_broadcast_mask);
355

356 357 358
	switch (*reason) {
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
359
		cpumask_set_cpu(cpu, tick_broadcast_on);
360
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
361 362
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
363
				clockevents_shutdown(dev);
364
		}
365
		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
366
			tick_broadcast_force = 1;
367 368
		break;
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
369 370 371 372 373 374
		if (tick_broadcast_force)
			break;
		cpumask_clear_cpu(cpu, tick_broadcast_on);
		if (!tick_device_is_functional(dev))
			break;
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
375 376
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
377 378
				tick_setup_periodic(dev, 0);
		}
379
		break;
380 381
	}

382
	if (cpumask_empty(tick_broadcast_mask)) {
383
		if (!bc_stopped)
384
			clockevents_shutdown(bc);
385
	} else if (bc_stopped) {
386 387
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
388 389
		else
			tick_broadcast_setup_oneshot(bc);
390 391
	}
out:
392
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
393 394 395 396 397 398 399 400
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop.
 */
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
401
	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
402
		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
403
		       "offline CPU #%d\n", *oncpu);
404
	else
405
		tick_do_broadcast_on_off(&reason);
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
}

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

428
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
429 430

	bc = tick_broadcast_device.evtdev;
431
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
432
	cpumask_clear_cpu(cpu, tick_broadcast_on);
433 434

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
435
		if (bc && cpumask_empty(tick_broadcast_mask))
436
			clockevents_shutdown(bc);
437 438
	}

439
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
440
}
441

442 443 444 445 446
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

447
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
448 449

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
450
	if (bc)
451
		clockevents_shutdown(bc);
452

453
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
454 455 456 457 458 459 460 461
}

int tick_resume_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;
	int broadcast = 0;

462
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
463 464 465

	bc = tick_broadcast_device.evtdev;

466
	if (bc) {
T
Thomas Gleixner 已提交
467 468
		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);

469 470
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
471
			if (!cpumask_empty(tick_broadcast_mask))
472
				tick_broadcast_start_periodic(bc);
473
			broadcast = cpumask_test_cpu(smp_processor_id(),
474
						     tick_broadcast_mask);
475 476
			break;
		case TICKDEV_MODE_ONESHOT:
477
			if (!cpumask_empty(tick_broadcast_mask))
478
				broadcast = tick_resume_broadcast_oneshot(bc);
479 480
			break;
		}
481
	}
482
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
483 484 485 486 487

	return broadcast;
}


488 489
#ifdef CONFIG_TICK_ONESHOT

490
static cpumask_var_t tick_broadcast_oneshot_mask;
491
static cpumask_var_t tick_broadcast_pending_mask;
492
static cpumask_var_t tick_broadcast_force_mask;
493

494
/*
495
 * Exposed for debugging: see timer_list.c
496
 */
497
struct cpumask *tick_get_broadcast_oneshot_mask(void)
498
{
499
	return tick_broadcast_oneshot_mask;
500 501
}

502 503 504 505 506 507 508 509 510 511 512 513
/*
 * Called before going idle with interrupts disabled. Checks whether a
 * broadcast event from the other core is about to happen. We detected
 * that in tick_broadcast_oneshot_control(). The callsite can use this
 * to avoid a deep idle transition as we are about to get the
 * broadcast IPI right away.
 */
int tick_check_broadcast_expired(void)
{
	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
}

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
531
				    ktime_t expires, int force)
532
{
533 534
	int ret;

535 536 537
	if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);

538 539 540 541
	ret = clockevents_program_event(bc, expires, force);
	if (!ret)
		tick_broadcast_set_affinity(bc, cpumask_of(cpu));
	return ret;
542 543
}

544 545 546
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
547
	return 0;
548 549
}

550 551 552 553
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
554
void tick_check_oneshot_broadcast_this_cpu(void)
555
{
556
	if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
557
		struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
558

559 560 561 562 563 564 565 566 567
		/*
		 * We might be in the middle of switching over from
		 * periodic to oneshot. If the CPU has not yet
		 * switched over, leave the device alone.
		 */
		if (td->mode == TICKDEV_MODE_ONESHOT) {
			clockevents_set_mode(td->evtdev,
					     CLOCK_EVT_MODE_ONESHOT);
		}
568 569 570
	}
}

571 572 573 574 575 576
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
577
	ktime_t now, next_event;
578
	int cpu, next_cpu = 0;
579

580
	raw_spin_lock(&tick_broadcast_lock);
581 582
again:
	dev->next_event.tv64 = KTIME_MAX;
583
	next_event.tv64 = KTIME_MAX;
584
	cpumask_clear(tmpmask);
585 586
	now = ktime_get();
	/* Find all expired events */
587
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
588
		td = &per_cpu(tick_cpu_device, cpu);
589
		if (td->evtdev->next_event.tv64 <= now.tv64) {
590
			cpumask_set_cpu(cpu, tmpmask);
591 592 593 594 595 596
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
597
		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
598
			next_event.tv64 = td->evtdev->next_event.tv64;
599 600
			next_cpu = cpu;
		}
601 602
	}

603 604 605 606 607 608
	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

609 610 611 612
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

613 614 615 616 617 618 619
	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

620
	/*
621 622
	 * Wakeup the cpus which have an expired event.
	 */
623
	tick_do_broadcast(tmpmask);
624 625 626 627 628 629 630 631 632 633

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
634
	 */
635
	if (next_event.tv64 != KTIME_MAX) {
636
		/*
637 638
		 * Rearm the broadcast device. If event expired,
		 * repeat the above
639
		 */
640
		if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
641 642
			goto again;
	}
643
	raw_spin_unlock(&tick_broadcast_lock);
644 645
}

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
{
	if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
		return 0;
	if (bc->next_event.tv64 == KTIME_MAX)
		return 0;
	return bc->bound_on == cpu ? -EBUSY : 0;
}

static void broadcast_shutdown_local(struct clock_event_device *bc,
				     struct clock_event_device *dev)
{
	/*
	 * For hrtimer based broadcasting we cannot shutdown the cpu
	 * local device if our own event is the first one to expire or
	 * if we own the broadcast timer.
	 */
	if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
		if (broadcast_needs_cpu(bc, smp_processor_id()))
			return;
		if (dev->next_event.tv64 < bc->next_event.tv64)
			return;
	}
	clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
}

static void broadcast_move_bc(int deadcpu)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	if (!bc || !broadcast_needs_cpu(bc, deadcpu))
		return;
	/* This moves the broadcast assignment to this cpu */
	clockevents_program_event(bc, bc->next_event, 1);
}

682 683 684
/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
685
 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
686
 */
687
int tick_broadcast_oneshot_control(unsigned long reason)
688 689 690 691
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags;
692
	ktime_t now;
693
	int cpu, ret = 0;
694 695 696 697 698 699

	/*
	 * Periodic mode does not care about the enter/exit of power
	 * states
	 */
	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
700
		return 0;
701

702 703 704 705
	/*
	 * We are called with preemtion disabled from the depth of the
	 * idle code, so we can't be moved away.
	 */
706 707 708 709 710
	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;

	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
711
		return 0;
712 713

	bc = tick_broadcast_device.evtdev;
714

715
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
716
	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
717
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
718
			WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
719
			broadcast_shutdown_local(bc, dev);
720 721 722 723 724 725 726 727 728 729
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
			 * woken by the IPI right away.
			 */
			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
			    dev->next_event.tv64 < bc->next_event.tv64)
730
				tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
731
		}
732 733 734 735 736 737 738 739 740 741
		/*
		 * If the current CPU owns the hrtimer broadcast
		 * mechanism, it cannot go deep idle and we remove the
		 * CPU from the broadcast mask. We don't have to go
		 * through the EXIT path as the local timer is not
		 * shutdown.
		 */
		ret = broadcast_needs_cpu(bc, cpu);
		if (ret)
			cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
742
	} else {
743
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
744
			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
745 746 747 748 749 750 751 752 753 754 755 756 757
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

758 759 760 761 762
			/*
			 * Bail out if there is no next event.
			 */
			if (dev->next_event.tv64 == KTIME_MAX)
				goto out;
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
			if (dev->next_event.tv64 <= now.tv64) {
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
804
			tick_program_event(dev->next_event, 1);
805 806
		}
	}
807
out:
808
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
809
	return ret;
810 811
}

812 813 814 815 816 817 818
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
819
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
820
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
821 822
}

823 824
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
825 826 827 828
{
	struct tick_device *td;
	int cpu;

829
	for_each_cpu(cpu, mask) {
830 831 832 833 834 835
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

836
/**
837
 * tick_broadcast_setup_oneshot - setup the broadcast device
838 839 840
 */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
841 842
	int cpu = smp_processor_id();

843 844
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
845 846
		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;

847
		bc->event_handler = tick_handle_oneshot_broadcast;
848 849 850 851 852 853 854

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
855 856 857 858
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
859

860
		if (was_periodic && !cpumask_empty(tmpmask)) {
861
			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
862
			tick_broadcast_init_next_event(tmpmask,
863
						       tick_next_period);
864
			tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
865 866
		} else
			bc->next_event.tv64 = KTIME_MAX;
867 868 869 870 871 872 873 874 875
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
876
	}
877 878 879 880 881 882 883 884 885 886
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

887
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
888 889

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
890 891 892
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
893

894
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
895 896 897 898 899 900 901 902 903 904 905
}


/*
 * Remove a dead CPU from broadcasting
 */
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
	unsigned long flags;
	unsigned int cpu = *cpup;

906
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
907

908
	/*
909 910
	 * Clear the broadcast masks for the dead cpu, but do not stop
	 * the broadcast device!
911
	 */
912
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
913 914
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
915

916 917
	broadcast_move_bc(cpu);

918
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
919 920
}

921 922 923 924 925 926 927 928
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

929 930 931 932 933 934 935 936 937 938
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

939
#endif
940 941 942

void __init tick_broadcast_init(void)
{
943
	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
944
	zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
945
	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
946
#ifdef CONFIG_TICK_ONESHOT
947 948 949
	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
950 951
#endif
}