tick-broadcast.c 21.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22
#include <linux/module.h>
23 24 25 26 27 28 29 30

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

31
static struct tick_device tick_broadcast_device;
32 33
static cpumask_var_t tick_broadcast_mask;
static cpumask_var_t tmpmask;
34
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
35
static int tick_broadcast_force;
36

37 38 39 40 41 42
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif

43 44 45 46 47 48 49 50
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

51
struct cpumask *tick_get_broadcast_mask(void)
52
{
53
	return tick_broadcast_mask;
54 55
}

56 57 58 59 60
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
61
	if (bc)
62 63 64 65 66 67
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static bool tick_check_broadcast_device(struct clock_event_device *curdev,
					struct clock_event_device *newdev)
{
	if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
	    (newdev->features & CLOCK_EVT_FEAT_C3STOP))
		return false;

	if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
	    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
		return false;

	return !curdev || newdev->rating > curdev->rating;
}

/*
 * Conditionally install/replace broadcast device
 */
85
void tick_install_broadcast_device(struct clock_event_device *dev)
86
{
87 88
	struct clock_event_device *cur = tick_broadcast_device.evtdev;

89
	if (!tick_check_broadcast_device(cur, dev))
90
		return;
91

92 93
	if (!try_module_get(dev->owner))
		return;
94

95
	clockevents_exchange_device(cur, dev);
96 97
	if (cur)
		cur->event_handler = clockevents_handle_noop;
98
	tick_broadcast_device.evtdev = dev;
99
	if (!cpumask_empty(tick_broadcast_mask))
100
		tick_broadcast_start_periodic(dev);
101 102 103 104 105 106 107 108 109 110
	/*
	 * Inform all cpus about this. We might be in a situation
	 * where we did not switch to oneshot mode because the per cpu
	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
	 * of a oneshot capable broadcast device. Without that
	 * notification the systems stays stuck in periodic mode
	 * forever.
	 */
	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_clock_notify();
111 112 113 114 115 116 117 118 119 120
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

121 122 123 124 125
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

126 127 128 129 130 131 132 133 134 135 136
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

137 138 139 140 141 142 143 144 145
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
	unsigned long flags;
	int ret = 0;

146
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
147 148 149 150 151 152 153 154 155

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
156
		tick_device_setup_broadcast_func(dev);
157
		cpumask_set_cpu(cpu, tick_broadcast_mask);
158 159
		tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
		ret = 1;
160 161 162 163 164 165 166 167
	} else {
		/*
		 * When the new device is not affected by the stop
		 * feature and the cpu is marked in the broadcast mask
		 * then clear the broadcast bit.
		 */
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
			int cpu = smp_processor_id();
168
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
169
			tick_broadcast_clear_oneshot(cpu);
170 171
		} else {
			tick_device_setup_broadcast_func(dev);
172 173
		}
	}
174
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
175 176 177
	return ret;
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

195
/*
196
 * Broadcast the event to the cpus, which are set in the mask (mangled).
197
 */
198
static void tick_do_broadcast(struct cpumask *mask)
199
{
200
	int cpu = smp_processor_id();
201 202 203 204 205
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
206 207
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
208 209 210 211
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

212
	if (!cpumask_empty(mask)) {
213 214 215 216 217 218
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
219 220
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
221 222 223 224 225 226 227 228 229
	}
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
230
	raw_spin_lock(&tick_broadcast_lock);
231

232 233
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	tick_do_broadcast(tmpmask);
234

235
	raw_spin_unlock(&tick_broadcast_lock);
236 237 238 239 240 241 242
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
243 244
	ktime_t next;

245 246 247 248 249 250 251 252 253 254
	tick_do_periodic_broadcast();

	/*
	 * The device is in periodic mode. No reprogramming necessary:
	 */
	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
		return;

	/*
	 * Setup the next period for devices, which do not have
255
	 * periodic mode. We read dev->next_event first and add to it
256
	 * when the event already expired. clockevents_program_event()
257 258
	 * sets dev->next_event only when the event is really
	 * programmed to the device.
259
	 */
260 261
	for (next = dev->next_event; ;) {
		next = ktime_add(next, tick_period);
262

263
		if (!clockevents_program_event(dev, next, false))
264 265 266 267 268 269 270 271 272
			return;
		tick_do_periodic_broadcast();
	}
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
273
static void tick_do_broadcast_on_off(unsigned long *reason)
274 275 276
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
277
	unsigned long flags;
278
	int cpu, bc_stopped;
279

280
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
281 282 283 284 285 286 287

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
288
	 * Is the device not affected by the powerstate ?
289
	 */
290
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
291 292
		goto out;

293 294
	if (!tick_device_is_functional(dev))
		goto out;
295

296
	bc_stopped = cpumask_empty(tick_broadcast_mask);
297

298 299 300
	switch (*reason) {
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
301
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
302 303
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
304
				clockevents_shutdown(dev);
305
		}
306
		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
307
			tick_broadcast_force = 1;
308 309
		break;
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
310
		if (!tick_broadcast_force &&
311
		    cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
312 313
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
314 315
				tick_setup_periodic(dev, 0);
		}
316
		break;
317 318
	}

319
	if (cpumask_empty(tick_broadcast_mask)) {
320
		if (!bc_stopped)
321
			clockevents_shutdown(bc);
322
	} else if (bc_stopped) {
323 324
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
325 326
		else
			tick_broadcast_setup_oneshot(bc);
327 328
	}
out:
329
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
330 331 332 333 334 335 336 337
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop.
 */
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
338
	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
339
		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
340
		       "offline CPU #%d\n", *oncpu);
341
	else
342
		tick_do_broadcast_on_off(&reason);
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
}

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

365
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
366 367

	bc = tick_broadcast_device.evtdev;
368
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
369 370

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
371
		if (bc && cpumask_empty(tick_broadcast_mask))
372
			clockevents_shutdown(bc);
373 374
	}

375
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
376
}
377

378 379 380 381 382
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

383
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
384 385

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
386
	if (bc)
387
		clockevents_shutdown(bc);
388

389
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
390 391 392 393 394 395 396 397
}

int tick_resume_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;
	int broadcast = 0;

398
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
399 400 401

	bc = tick_broadcast_device.evtdev;

402
	if (bc) {
T
Thomas Gleixner 已提交
403 404
		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);

405 406
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
407
			if (!cpumask_empty(tick_broadcast_mask))
408
				tick_broadcast_start_periodic(bc);
409
			broadcast = cpumask_test_cpu(smp_processor_id(),
410
						     tick_broadcast_mask);
411 412
			break;
		case TICKDEV_MODE_ONESHOT:
413
			if (!cpumask_empty(tick_broadcast_mask))
414
				broadcast = tick_resume_broadcast_oneshot(bc);
415 416
			break;
		}
417
	}
418
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
419 420 421 422 423

	return broadcast;
}


424 425
#ifdef CONFIG_TICK_ONESHOT

426
static cpumask_var_t tick_broadcast_oneshot_mask;
427
static cpumask_var_t tick_broadcast_pending_mask;
428
static cpumask_var_t tick_broadcast_force_mask;
429

430
/*
431
 * Exposed for debugging: see timer_list.c
432
 */
433
struct cpumask *tick_get_broadcast_oneshot_mask(void)
434
{
435
	return tick_broadcast_oneshot_mask;
436 437
}

438 439 440 441 442 443 444 445 446 447 448 449
/*
 * Called before going idle with interrupts disabled. Checks whether a
 * broadcast event from the other core is about to happen. We detected
 * that in tick_broadcast_oneshot_control(). The callsite can use this
 * to avoid a deep idle transition as we are about to get the
 * broadcast IPI right away.
 */
int tick_check_broadcast_expired(void)
{
	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
}

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
467
				    ktime_t expires, int force)
468
{
469 470
	int ret;

471 472 473
	if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);

474 475 476 477
	ret = clockevents_program_event(bc, expires, force);
	if (!ret)
		tick_broadcast_set_affinity(bc, cpumask_of(cpu));
	return ret;
478 479
}

480 481 482
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
483
	return 0;
484 485
}

486 487 488 489 490 491
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
void tick_check_oneshot_broadcast(int cpu)
{
492
	if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
493 494 495 496 497 498
		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);

		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
	}
}

499 500 501 502 503 504
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
505
	ktime_t now, next_event;
506
	int cpu, next_cpu = 0;
507

508
	raw_spin_lock(&tick_broadcast_lock);
509 510
again:
	dev->next_event.tv64 = KTIME_MAX;
511
	next_event.tv64 = KTIME_MAX;
512
	cpumask_clear(tmpmask);
513 514
	now = ktime_get();
	/* Find all expired events */
515
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
516
		td = &per_cpu(tick_cpu_device, cpu);
517
		if (td->evtdev->next_event.tv64 <= now.tv64) {
518
			cpumask_set_cpu(cpu, tmpmask);
519 520 521 522 523 524
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
525
		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
526
			next_event.tv64 = td->evtdev->next_event.tv64;
527 528
			next_cpu = cpu;
		}
529 530
	}

531 532 533 534
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

535 536 537 538 539 540 541
	/*
	 * Sanity check. Catch the case where we try to broadcast to
	 * offline cpus.
	 */
	if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
		cpumask_and(tmpmask, tmpmask, cpu_online_mask);

542
	/*
543 544
	 * Wakeup the cpus which have an expired event.
	 */
545
	tick_do_broadcast(tmpmask);
546 547 548 549 550 551 552 553 554 555

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
556
	 */
557
	if (next_event.tv64 != KTIME_MAX) {
558
		/*
559 560
		 * Rearm the broadcast device. If event expired,
		 * repeat the above
561
		 */
562
		if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
563 564
			goto again;
	}
565
	raw_spin_unlock(&tick_broadcast_lock);
566 567 568 569 570 571 572 573 574 575 576
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
void tick_broadcast_oneshot_control(unsigned long reason)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags;
577
	ktime_t now;
578 579 580 581 582 583 584
	int cpu;

	/*
	 * Periodic mode does not care about the enter/exit of power
	 * states
	 */
	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
585
		return;
586

587 588 589 590
	/*
	 * We are called with preemtion disabled from the depth of the
	 * idle code, so we can't be moved away.
	 */
591 592 593 594 595
	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;

	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
596 597 598
		return;

	bc = tick_broadcast_device.evtdev;
599

600
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
601
	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
602
		WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
603
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
604
			clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
605 606 607 608 609 610 611 612 613 614
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
			 * woken by the IPI right away.
			 */
			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
			    dev->next_event.tv64 < bc->next_event.tv64)
615
				tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
616 617
		}
	} else {
618
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
619
			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
			if (dev->next_event.tv64 == KTIME_MAX)
				goto out;
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
			if (dev->next_event.tv64 <= now.tv64) {
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
676
			tick_program_event(dev->next_event, 1);
677 678
		}
	}
679
out:
680
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
681 682
}

683 684 685 686 687 688 689
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
690
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
691 692
}

693 694
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
695 696 697 698
{
	struct tick_device *td;
	int cpu;

699
	for_each_cpu(cpu, mask) {
700 701 702 703 704 705
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

706
/**
707
 * tick_broadcast_setup_oneshot - setup the broadcast device
708 709 710
 */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
711 712
	int cpu = smp_processor_id();

713 714
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
715 716
		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;

717
		bc->event_handler = tick_handle_oneshot_broadcast;
718 719

		/* Take the do_timer update */
720
		if (!tick_nohz_full_cpu(cpu))
721
			tick_do_timer_cpu = cpu;
722 723 724 725 726 727 728

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
729 730 731 732
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
733

734
		if (was_periodic && !cpumask_empty(tmpmask)) {
735
			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
736
			tick_broadcast_init_next_event(tmpmask,
737
						       tick_next_period);
738
			tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
739 740
		} else
			bc->next_event.tv64 = KTIME_MAX;
741 742 743 744 745 746 747 748 749
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
750
	}
751 752 753 754 755 756 757 758 759 760
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

761
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
762 763

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
764 765 766
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
767

768
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
769 770 771 772 773 774 775 776 777 778 779
}


/*
 * Remove a dead CPU from broadcasting
 */
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
	unsigned long flags;
	unsigned int cpu = *cpup;

780
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
781

782
	/*
783 784
	 * Clear the broadcast masks for the dead cpu, but do not stop
	 * the broadcast device!
785
	 */
786
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
787 788
	cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
	cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
789

790
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
791 792
}

793 794 795 796 797 798 799 800
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

801 802 803 804 805 806 807 808 809 810
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

811
#endif
812 813 814

void __init tick_broadcast_init(void)
{
815 816
	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
817
#ifdef CONFIG_TICK_ONESHOT
818 819 820
	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
821 822
#endif
}