tick-broadcast.c 19.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22 23 24 25 26 27 28 29

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

30
static struct tick_device tick_broadcast_device;
31 32
static cpumask_var_t tick_broadcast_mask;
static cpumask_var_t tmpmask;
33
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34
static int tick_broadcast_force;
35

36 37 38 39 40 41
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif

42 43 44 45 46 47 48 49
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

50
struct cpumask *tick_get_broadcast_mask(void)
51
{
52
	return tick_broadcast_mask;
53 54
}

55 56 57 58 59
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
60
	if (bc)
61 62 63 64 65 66 67 68
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
int tick_check_broadcast_device(struct clock_event_device *dev)
{
69 70 71
	if ((tick_broadcast_device.evtdev &&
	     tick_broadcast_device.evtdev->rating >= dev->rating) ||
	     (dev->features & CLOCK_EVT_FEAT_C3STOP))
72 73
		return 0;

74
	clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
75
	tick_broadcast_device.evtdev = dev;
76
	if (!cpumask_empty(tick_broadcast_mask))
77 78 79 80 81 82 83 84 85 86 87 88
		tick_broadcast_start_periodic(dev);
	return 1;
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

89 90 91 92 93
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

94 95 96 97 98 99 100 101 102 103 104
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

105 106 107 108 109 110 111 112 113
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
	unsigned long flags;
	int ret = 0;

114
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
115 116 117 118 119 120 121 122 123

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
124
		tick_device_setup_broadcast_func(dev);
125
		cpumask_set_cpu(cpu, tick_broadcast_mask);
126 127
		tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
		ret = 1;
128 129 130 131 132 133 134 135
	} else {
		/*
		 * When the new device is not affected by the stop
		 * feature and the cpu is marked in the broadcast mask
		 * then clear the broadcast bit.
		 */
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
			int cpu = smp_processor_id();
136
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
137
			tick_broadcast_clear_oneshot(cpu);
138 139
		} else {
			tick_device_setup_broadcast_func(dev);
140 141
		}
	}
142
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
143 144 145
	return ret;
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

163
/*
164
 * Broadcast the event to the cpus, which are set in the mask (mangled).
165
 */
166
static void tick_do_broadcast(struct cpumask *mask)
167
{
168
	int cpu = smp_processor_id();
169 170 171 172 173
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
174 175
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
176 177 178 179
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

180
	if (!cpumask_empty(mask)) {
181 182 183 184 185 186
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
187 188
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
189 190 191 192 193 194 195 196 197
	}
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
198
	raw_spin_lock(&tick_broadcast_lock);
199

200 201
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	tick_do_broadcast(tmpmask);
202

203
	raw_spin_unlock(&tick_broadcast_lock);
204 205 206 207 208 209 210
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
211 212
	ktime_t next;

213 214 215 216 217 218 219 220 221 222
	tick_do_periodic_broadcast();

	/*
	 * The device is in periodic mode. No reprogramming necessary:
	 */
	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
		return;

	/*
	 * Setup the next period for devices, which do not have
223
	 * periodic mode. We read dev->next_event first and add to it
224
	 * when the event already expired. clockevents_program_event()
225 226
	 * sets dev->next_event only when the event is really
	 * programmed to the device.
227
	 */
228 229
	for (next = dev->next_event; ;) {
		next = ktime_add(next, tick_period);
230

231
		if (!clockevents_program_event(dev, next, false))
232 233 234 235 236 237 238 239 240
			return;
		tick_do_periodic_broadcast();
	}
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
241
static void tick_do_broadcast_on_off(unsigned long *reason)
242 243 244
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
245
	unsigned long flags;
246
	int cpu, bc_stopped;
247

248
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
249 250 251 252 253 254 255

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
256
	 * Is the device not affected by the powerstate ?
257
	 */
258
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
259 260
		goto out;

261 262
	if (!tick_device_is_functional(dev))
		goto out;
263

264
	bc_stopped = cpumask_empty(tick_broadcast_mask);
265

266 267 268
	switch (*reason) {
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
269
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
270 271
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
272
				clockevents_shutdown(dev);
273
		}
274
		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
275
			tick_broadcast_force = 1;
276 277
		break;
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
278
		if (!tick_broadcast_force &&
279
		    cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
280 281
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
282 283
				tick_setup_periodic(dev, 0);
		}
284
		break;
285 286
	}

287
	if (cpumask_empty(tick_broadcast_mask)) {
288
		if (!bc_stopped)
289
			clockevents_shutdown(bc);
290
	} else if (bc_stopped) {
291 292
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
293 294
		else
			tick_broadcast_setup_oneshot(bc);
295 296
	}
out:
297
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
298 299 300 301 302 303 304 305
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop.
 */
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
306
	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
307
		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
308
		       "offline CPU #%d\n", *oncpu);
309
	else
310
		tick_do_broadcast_on_off(&reason);
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
}

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

333
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
334 335

	bc = tick_broadcast_device.evtdev;
336
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
337 338

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
339
		if (bc && cpumask_empty(tick_broadcast_mask))
340
			clockevents_shutdown(bc);
341 342
	}

343
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
344
}
345

346 347 348 349 350
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

351
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
352 353

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
354
	if (bc)
355
		clockevents_shutdown(bc);
356

357
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
358 359 360 361 362 363 364 365
}

int tick_resume_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;
	int broadcast = 0;

366
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
367 368 369

	bc = tick_broadcast_device.evtdev;

370
	if (bc) {
T
Thomas Gleixner 已提交
371 372
		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);

373 374
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
375
			if (!cpumask_empty(tick_broadcast_mask))
376
				tick_broadcast_start_periodic(bc);
377
			broadcast = cpumask_test_cpu(smp_processor_id(),
378
						     tick_broadcast_mask);
379 380
			break;
		case TICKDEV_MODE_ONESHOT:
381
			if (!cpumask_empty(tick_broadcast_mask))
382
				broadcast = tick_resume_broadcast_oneshot(bc);
383 384
			break;
		}
385
	}
386
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
387 388 389 390 391

	return broadcast;
}


392 393
#ifdef CONFIG_TICK_ONESHOT

394
static cpumask_var_t tick_broadcast_oneshot_mask;
395
static cpumask_var_t tick_broadcast_pending_mask;
396
static cpumask_var_t tick_broadcast_force_mask;
397

398
/*
399
 * Exposed for debugging: see timer_list.c
400
 */
401
struct cpumask *tick_get_broadcast_oneshot_mask(void)
402
{
403
	return tick_broadcast_oneshot_mask;
404 405
}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
423
				    ktime_t expires, int force)
424
{
425 426
	int ret;

427 428 429
	if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);

430 431 432 433
	ret = clockevents_program_event(bc, expires, force);
	if (!ret)
		tick_broadcast_set_affinity(bc, cpumask_of(cpu));
	return ret;
434 435
}

436 437 438
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
439
	return 0;
440 441
}

442 443 444 445 446 447
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
void tick_check_oneshot_broadcast(int cpu)
{
448
	if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
449 450 451 452 453 454
		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);

		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
	}
}

455 456 457 458 459 460
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
461
	ktime_t now, next_event;
462
	int cpu, next_cpu = 0;
463

464
	raw_spin_lock(&tick_broadcast_lock);
465 466
again:
	dev->next_event.tv64 = KTIME_MAX;
467
	next_event.tv64 = KTIME_MAX;
468
	cpumask_clear(tmpmask);
469 470
	now = ktime_get();
	/* Find all expired events */
471
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
472
		td = &per_cpu(tick_cpu_device, cpu);
473
		if (td->evtdev->next_event.tv64 <= now.tv64) {
474
			cpumask_set_cpu(cpu, tmpmask);
475 476 477 478 479 480
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
481
		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
482
			next_event.tv64 = td->evtdev->next_event.tv64;
483 484
			next_cpu = cpu;
		}
485 486
	}

487 488 489 490
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

491
	/*
492 493
	 * Wakeup the cpus which have an expired event.
	 */
494
	tick_do_broadcast(tmpmask);
495 496 497 498 499 500 501 502 503 504

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
505
	 */
506
	if (next_event.tv64 != KTIME_MAX) {
507
		/*
508 509
		 * Rearm the broadcast device. If event expired,
		 * repeat the above
510
		 */
511
		if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
512 513
			goto again;
	}
514
	raw_spin_unlock(&tick_broadcast_lock);
515 516 517 518 519 520 521 522 523 524 525
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
void tick_broadcast_oneshot_control(unsigned long reason)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags;
526
	ktime_t now;
527 528 529 530 531 532 533
	int cpu;

	/*
	 * Periodic mode does not care about the enter/exit of power
	 * states
	 */
	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
534
		return;
535

536 537 538 539
	/*
	 * We are called with preemtion disabled from the depth of the
	 * idle code, so we can't be moved away.
	 */
540 541 542 543 544
	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;

	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
545 546 547
		return;

	bc = tick_broadcast_device.evtdev;
548

549
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
550
	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
551
		WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
552
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
553
			clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
554 555 556 557 558 559 560 561 562 563
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
			 * woken by the IPI right away.
			 */
			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
			    dev->next_event.tv64 < bc->next_event.tv64)
564
				tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
565 566
		}
	} else {
567
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
568
			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
			if (dev->next_event.tv64 == KTIME_MAX)
				goto out;
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
			if (dev->next_event.tv64 <= now.tv64) {
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
625
			tick_program_event(dev->next_event, 1);
626 627
		}
	}
628
out:
629
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
630 631
}

632 633 634 635 636 637 638
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
639
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
640 641
}

642 643
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
644 645 646 647
{
	struct tick_device *td;
	int cpu;

648
	for_each_cpu(cpu, mask) {
649 650 651 652 653 654
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

655
/**
656
 * tick_broadcast_setup_oneshot - setup the broadcast device
657 658 659
 */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
660 661
	int cpu = smp_processor_id();

662 663
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
664 665
		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;

666
		bc->event_handler = tick_handle_oneshot_broadcast;
667 668 669 670 671 672 673 674 675 676

		/* Take the do_timer update */
		tick_do_timer_cpu = cpu;

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
677 678 679 680
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
681

682
		if (was_periodic && !cpumask_empty(tmpmask)) {
683
			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
684
			tick_broadcast_init_next_event(tmpmask,
685
						       tick_next_period);
686
			tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
687 688
		} else
			bc->next_event.tv64 = KTIME_MAX;
689 690 691 692 693 694 695 696 697
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
698
	}
699 700 701 702 703 704 705 706 707 708
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

709
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
710 711

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
712 713 714
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
715

716
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
717 718 719 720 721 722 723 724 725 726 727
}


/*
 * Remove a dead CPU from broadcasting
 */
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
	unsigned long flags;
	unsigned int cpu = *cpup;

728
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
729

730 731 732 733
	/*
	 * Clear the broadcast mask flag for the dead cpu, but do not
	 * stop the broadcast device!
	 */
734
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
735

736
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
737 738
}

739 740 741 742 743 744 745 746
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

747 748 749 750 751 752 753 754 755 756
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

757
#endif
758 759 760 761 762 763 764

void __init tick_broadcast_init(void)
{
	alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
	alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
#ifdef CONFIG_TICK_ONESHOT
	alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
765
	alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
766
	alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
767 768
#endif
}