tick-broadcast.c 25.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22
#include <linux/module.h>
23 24 25 26 27 28 29 30

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

31
static struct tick_device tick_broadcast_device;
32
static cpumask_var_t tick_broadcast_mask;
33
static cpumask_var_t tick_broadcast_on;
34
static cpumask_var_t tmpmask;
35
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
36
static int tick_broadcast_force;
37

38 39
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
40
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
41 42
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
43
static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
44 45
#endif

46 47 48 49 50 51 52 53
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

54
struct cpumask *tick_get_broadcast_mask(void)
55
{
56
	return tick_broadcast_mask;
57 58
}

59 60 61 62 63
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
64
	if (bc)
65 66 67 68 69 70
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
71 72 73 74
static bool tick_check_broadcast_device(struct clock_event_device *curdev,
					struct clock_event_device *newdev)
{
	if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
75
	    (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
76 77 78 79 80 81 82 83 84 85 86 87 88
	    (newdev->features & CLOCK_EVT_FEAT_C3STOP))
		return false;

	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
	    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
		return false;

	return !curdev || newdev->rating > curdev->rating;
}

/*
 * Conditionally install/replace broadcast device
 */
89
void tick_install_broadcast_device(struct clock_event_device *dev)
90
{
91 92
	struct clock_event_device *cur = tick_broadcast_device.evtdev;

93
	if (!tick_check_broadcast_device(cur, dev))
94
		return;
95

96 97
	if (!try_module_get(dev->owner))
		return;
98

99
	clockevents_exchange_device(cur, dev);
100 101
	if (cur)
		cur->event_handler = clockevents_handle_noop;
102
	tick_broadcast_device.evtdev = dev;
103
	if (!cpumask_empty(tick_broadcast_mask))
104
		tick_broadcast_start_periodic(dev);
105 106 107 108 109 110 111 112 113 114
	/*
	 * Inform all cpus about this. We might be in a situation
	 * where we did not switch to oneshot mode because the per cpu
	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
	 * of a oneshot capable broadcast device. Without that
	 * notification the systems stays stuck in periodic mode
	 * forever.
	 */
	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_clock_notify();
115 116 117 118 119 120 121 122 123 124
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

125 126 127 128 129 130 131 132 133 134 135 136 137
int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
{
	int ret = -ENODEV;

	if (tick_is_broadcast_device(dev)) {
		raw_spin_lock(&tick_broadcast_lock);
		ret = __clockevents_update_freq(dev, freq);
		raw_spin_unlock(&tick_broadcast_lock);
	}
	return ret;
}


138 139 140 141 142
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

143 144 145 146 147 148 149 150 151 152 153
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

154 155 156 157 158 159
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
160
	struct clock_event_device *bc = tick_broadcast_device.evtdev;
161
	unsigned long flags;
162
	int ret;
163

164
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
165 166 167 168 169 170 171 172 173

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
174
		tick_device_setup_broadcast_func(dev);
175
		cpumask_set_cpu(cpu, tick_broadcast_mask);
176 177 178 179
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
		else
			tick_broadcast_setup_oneshot(bc);
180
		ret = 1;
181 182
	} else {
		/*
183 184
		 * Clear the broadcast bit for this cpu if the
		 * device is not power state affected.
185
		 */
186
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
187
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
188
		else
189
			tick_device_setup_broadcast_func(dev);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231

		/*
		 * Clear the broadcast bit if the CPU is not in
		 * periodic broadcast on state.
		 */
		if (!cpumask_test_cpu(cpu, tick_broadcast_on))
			cpumask_clear_cpu(cpu, tick_broadcast_mask);

		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_ONESHOT:
			/*
			 * If the system is in oneshot mode we can
			 * unconditionally clear the oneshot mask bit,
			 * because the CPU is running and therefore
			 * not in an idle state which causes the power
			 * state affected device to stop. Let the
			 * caller initialize the device.
			 */
			tick_broadcast_clear_oneshot(cpu);
			ret = 0;
			break;

		case TICKDEV_MODE_PERIODIC:
			/*
			 * If the system is in periodic mode, check
			 * whether the broadcast device can be
			 * switched off now.
			 */
			if (cpumask_empty(tick_broadcast_mask) && bc)
				clockevents_shutdown(bc);
			/*
			 * If we kept the cpu in the broadcast mask,
			 * tell the caller to leave the per cpu device
			 * in shutdown state. The periodic interrupt
			 * is delivered by the broadcast device.
			 */
			ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
			break;
		default:
			/* Nothing to do */
			ret = 0;
			break;
232 233
		}
	}
234
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
235 236 237
	return ret;
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

255
/*
256
 * Broadcast the event to the cpus, which are set in the mask (mangled).
257
 */
258
static void tick_do_broadcast(struct cpumask *mask)
259
{
260
	int cpu = smp_processor_id();
261 262 263 264 265
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
266 267
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
268 269 270 271
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

272
	if (!cpumask_empty(mask)) {
273 274 275 276 277 278
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
279 280
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
281 282 283 284 285 286 287 288 289
	}
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
290 291
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	tick_do_broadcast(tmpmask);
292 293 294 295 296 297 298
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
299 300
	ktime_t next;

301 302
	raw_spin_lock(&tick_broadcast_lock);

303 304 305 306 307
	tick_do_periodic_broadcast();

	/*
	 * The device is in periodic mode. No reprogramming necessary:
	 */
308
	if (dev->state == CLOCK_EVT_STATE_PERIODIC)
309
		goto unlock;
310 311 312

	/*
	 * Setup the next period for devices, which do not have
313
	 * periodic mode. We read dev->next_event first and add to it
314
	 * when the event already expired. clockevents_program_event()
315 316
	 * sets dev->next_event only when the event is really
	 * programmed to the device.
317
	 */
318 319
	for (next = dev->next_event; ;) {
		next = ktime_add(next, tick_period);
320

321
		if (!clockevents_program_event(dev, next, false))
322
			goto unlock;
323 324
		tick_do_periodic_broadcast();
	}
325 326
unlock:
	raw_spin_unlock(&tick_broadcast_lock);
327 328 329 330 331 332
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
333
static void tick_do_broadcast_on_off(unsigned long *reason)
334 335 336
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
337
	unsigned long flags;
338
	int cpu, bc_stopped;
339

340
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
341 342 343 344 345 346 347

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
348
	 * Is the device not affected by the powerstate ?
349
	 */
350
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
351 352
		goto out;

353 354
	if (!tick_device_is_functional(dev))
		goto out;
355

356
	bc_stopped = cpumask_empty(tick_broadcast_mask);
357

358 359 360
	switch (*reason) {
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
361
		cpumask_set_cpu(cpu, tick_broadcast_on);
362
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
363 364
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
365
				clockevents_shutdown(dev);
366
		}
367
		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
368
			tick_broadcast_force = 1;
369 370
		break;
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
371 372 373 374 375 376
		if (tick_broadcast_force)
			break;
		cpumask_clear_cpu(cpu, tick_broadcast_on);
		if (!tick_device_is_functional(dev))
			break;
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
377 378
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
379 380
				tick_setup_periodic(dev, 0);
		}
381
		break;
382 383
	}

384
	if (cpumask_empty(tick_broadcast_mask)) {
385
		if (!bc_stopped)
386
			clockevents_shutdown(bc);
387
	} else if (bc_stopped) {
388 389
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
390 391
		else
			tick_broadcast_setup_oneshot(bc);
392 393
	}
out:
394
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
395 396 397 398 399 400 401 402
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop.
 */
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
403
	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
404
		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
405
		       "offline CPU #%d\n", *oncpu);
406
	else
407
		tick_do_broadcast_on_off(&reason);
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
}

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

430
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
431 432

	bc = tick_broadcast_device.evtdev;
433
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
434
	cpumask_clear_cpu(cpu, tick_broadcast_on);
435 436

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
437
		if (bc && cpumask_empty(tick_broadcast_mask))
438
			clockevents_shutdown(bc);
439 440
	}

441
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
442
}
443

444 445 446 447 448
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

449
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
450 451

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
452
	if (bc)
453
		clockevents_shutdown(bc);
454

455
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
456 457
}

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
/*
 * This is called from tick_resume_local() on a resuming CPU. That's
 * called from the core resume function, tick_unfreeze() and the magic XEN
 * resume hackery.
 *
 * In none of these cases the broadcast device mode can change and the
 * bit of the resuming CPU in the broadcast mask is safe as well.
 */
bool tick_resume_check_broadcast(void)
{
	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
		return false;
	else
		return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
}

void tick_resume_broadcast(void)
475 476 477 478
{
	struct clock_event_device *bc;
	unsigned long flags;

479
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
480 481 482

	bc = tick_broadcast_device.evtdev;

483
	if (bc) {
484
		clockevents_tick_resume(bc);
T
Thomas Gleixner 已提交
485

486 487
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
488
			if (!cpumask_empty(tick_broadcast_mask))
489 490 491
				tick_broadcast_start_periodic(bc);
			break;
		case TICKDEV_MODE_ONESHOT:
492
			if (!cpumask_empty(tick_broadcast_mask))
493
				tick_resume_broadcast_oneshot(bc);
494 495
			break;
		}
496
	}
497
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
498 499
}

500 501
#ifdef CONFIG_TICK_ONESHOT

502
static cpumask_var_t tick_broadcast_oneshot_mask;
503
static cpumask_var_t tick_broadcast_pending_mask;
504
static cpumask_var_t tick_broadcast_force_mask;
505

506
/*
507
 * Exposed for debugging: see timer_list.c
508
 */
509
struct cpumask *tick_get_broadcast_oneshot_mask(void)
510
{
511
	return tick_broadcast_oneshot_mask;
512 513
}

514 515 516 517 518 519 520 521 522 523 524 525
/*
 * Called before going idle with interrupts disabled. Checks whether a
 * broadcast event from the other core is about to happen. We detected
 * that in tick_broadcast_oneshot_control(). The callsite can use this
 * to avoid a deep idle transition as we are about to get the
 * broadcast IPI right away.
 */
int tick_check_broadcast_expired(void)
{
	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
}

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
543
				    ktime_t expires, int force)
544
{
545 546
	int ret;

547 548
	if (bc->state != CLOCK_EVT_STATE_ONESHOT)
		clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
549

550 551 552 553
	ret = clockevents_program_event(bc, expires, force);
	if (!ret)
		tick_broadcast_set_affinity(bc, cpumask_of(cpu));
	return ret;
554 555
}

556
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
557
{
558
	clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
559 560
}

561 562 563 564
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
565
void tick_check_oneshot_broadcast_this_cpu(void)
566
{
567
	if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
568
		struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
569

570 571 572 573 574 575
		/*
		 * We might be in the middle of switching over from
		 * periodic to oneshot. If the CPU has not yet
		 * switched over, leave the device alone.
		 */
		if (td->mode == TICKDEV_MODE_ONESHOT) {
576 577
			clockevents_set_state(td->evtdev,
					      CLOCK_EVT_STATE_ONESHOT);
578
		}
579 580 581
	}
}

582 583 584 585 586 587
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
588
	ktime_t now, next_event;
589
	int cpu, next_cpu = 0;
590

591
	raw_spin_lock(&tick_broadcast_lock);
592 593
again:
	dev->next_event.tv64 = KTIME_MAX;
594
	next_event.tv64 = KTIME_MAX;
595
	cpumask_clear(tmpmask);
596 597
	now = ktime_get();
	/* Find all expired events */
598
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
599
		td = &per_cpu(tick_cpu_device, cpu);
600
		if (td->evtdev->next_event.tv64 <= now.tv64) {
601
			cpumask_set_cpu(cpu, tmpmask);
602 603 604 605 606 607
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
608
		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
609
			next_event.tv64 = td->evtdev->next_event.tv64;
610 611
			next_cpu = cpu;
		}
612 613
	}

614 615 616 617 618 619
	/*
	 * Remove the current cpu from the pending mask. The event is
	 * delivered immediately in tick_do_broadcast() !
	 */
	cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);

620 621 622 623
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

624 625 626 627 628 629 630
	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

631
	/*
632 633
	 * Wakeup the cpus which have an expired event.
	 */
634
	tick_do_broadcast(tmpmask);
635 636 637 638 639 640 641 642 643 644

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
645
	 */
646
	if (next_event.tv64 != KTIME_MAX) {
647
		/*
648 649
		 * Rearm the broadcast device. If event expired,
		 * repeat the above
650
		 */
651
		if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
652 653
			goto again;
	}
654
	raw_spin_unlock(&tick_broadcast_lock);
655 656
}

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
{
	if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
		return 0;
	if (bc->next_event.tv64 == KTIME_MAX)
		return 0;
	return bc->bound_on == cpu ? -EBUSY : 0;
}

static void broadcast_shutdown_local(struct clock_event_device *bc,
				     struct clock_event_device *dev)
{
	/*
	 * For hrtimer based broadcasting we cannot shutdown the cpu
	 * local device if our own event is the first one to expire or
	 * if we own the broadcast timer.
	 */
	if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
		if (broadcast_needs_cpu(bc, smp_processor_id()))
			return;
		if (dev->next_event.tv64 < bc->next_event.tv64)
			return;
	}
680
	clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
681 682
}

683
void hotplug_cpu__broadcast_tick_pull(int deadcpu)
684
{
685 686
	struct clock_event_device *bc;
	unsigned long flags;
687

688 689 690 691 692 693 694 695
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
	bc = tick_broadcast_device.evtdev;

	if (bc && broadcast_needs_cpu(bc, deadcpu)) {
		/* This moves the broadcast assignment to this CPU: */
		clockevents_program_event(bc, bc->next_event, 1);
	}
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
696 697
}

698 699 700
/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
701
 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
702
 */
703
int tick_broadcast_oneshot_control(unsigned long reason)
704 705 706 707
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags;
708
	ktime_t now;
709
	int cpu, ret = 0;
710 711 712 713 714 715

	/*
	 * Periodic mode does not care about the enter/exit of power
	 * states
	 */
	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
716
		return 0;
717

718 719 720 721
	/*
	 * We are called with preemtion disabled from the depth of the
	 * idle code, so we can't be moved away.
	 */
722 723 724 725 726
	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;

	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
727
		return 0;
728 729

	bc = tick_broadcast_device.evtdev;
730

731
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
732
	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
733
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
734
			WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
735
			broadcast_shutdown_local(bc, dev);
736 737 738 739 740 741 742 743 744 745
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
			 * woken by the IPI right away.
			 */
			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
			    dev->next_event.tv64 < bc->next_event.tv64)
746
				tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
747
		}
748 749 750 751 752 753 754 755 756 757
		/*
		 * If the current CPU owns the hrtimer broadcast
		 * mechanism, it cannot go deep idle and we remove the
		 * CPU from the broadcast mask. We don't have to go
		 * through the EXIT path as the local timer is not
		 * shutdown.
		 */
		ret = broadcast_needs_cpu(bc, cpu);
		if (ret)
			cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
758
	} else {
759
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
760
			clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
761 762 763 764 765 766 767 768 769 770 771 772 773
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

774 775 776 777 778
			/*
			 * Bail out if there is no next event.
			 */
			if (dev->next_event.tv64 == KTIME_MAX)
				goto out;
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
			if (dev->next_event.tv64 <= now.tv64) {
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
820
			tick_program_event(dev->next_event, 1);
821 822
		}
	}
823
out:
824
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
825
	return ret;
826 827
}

828 829 830 831 832 833 834
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
835
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
836
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
837 838
}

839 840
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
841 842 843 844
{
	struct tick_device *td;
	int cpu;

845
	for_each_cpu(cpu, mask) {
846 847 848 849 850 851
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

852
/**
853
 * tick_broadcast_setup_oneshot - setup the broadcast device
854 855 856
 */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
857 858
	int cpu = smp_processor_id();

859 860
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
861
		int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC;
862

863
		bc->event_handler = tick_handle_oneshot_broadcast;
864 865 866 867 868 869 870

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
871 872 873 874
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
875

876
		if (was_periodic && !cpumask_empty(tmpmask)) {
877
			clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
878
			tick_broadcast_init_next_event(tmpmask,
879
						       tick_next_period);
880
			tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
881 882
		} else
			bc->next_event.tv64 = KTIME_MAX;
883 884 885 886 887 888 889 890 891
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
892
	}
893 894 895 896 897 898 899 900 901 902
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

903
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
904 905

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
906 907 908
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
909

910
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
911 912 913 914 915 916 917 918 919 920 921
}


/*
 * Remove a dead CPU from broadcasting
 */
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
	unsigned long flags;
	unsigned int cpu = *cpup;

922
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
923

924
	/*
925 926
	 * Clear the broadcast masks for the dead cpu, but do not stop
	 * the broadcast device!
927
	 */
928
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
929 930
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
931

932
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
933 934
}

935 936 937 938 939 940 941 942
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

943 944 945 946 947 948 949 950 951 952
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

953
#endif
954 955 956

void __init tick_broadcast_init(void)
{
957
	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
958
	zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
959
	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
960
#ifdef CONFIG_TICK_ONESHOT
961 962 963
	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
964 965
#endif
}