tick-broadcast.c 20.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-broadcast.c
 *
 * This file contains functions which emulate a local clock-event
 * device via a broadcast event source.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/smp.h>
22 23 24 25 26 27 28 29

#include "tick-internal.h"

/*
 * Broadcast support for broken x86 hardware, where the local apic
 * timer stops in C3 state.
 */

30
static struct tick_device tick_broadcast_device;
31 32
static cpumask_var_t tick_broadcast_mask;
static cpumask_var_t tmpmask;
33
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34
static int tick_broadcast_force;
35

36 37 38 39 40 41
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif

42 43 44 45 46 47 48 49
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_broadcast_device(void)
{
	return &tick_broadcast_device;
}

50
struct cpumask *tick_get_broadcast_mask(void)
51
{
52
	return tick_broadcast_mask;
53 54
}

55 56 57 58 59
/*
 * Start the device in periodic mode
 */
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
T
Thomas Gleixner 已提交
60
	if (bc)
61 62 63 64 65 66 67 68
		tick_setup_periodic(bc, 1);
}

/*
 * Check, if the device can be utilized as broadcast device:
 */
int tick_check_broadcast_device(struct clock_event_device *dev)
{
69 70
	struct clock_event_device *cur = tick_broadcast_device.evtdev;

71 72
	if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
	    (tick_broadcast_device.evtdev &&
73 74
	     tick_broadcast_device.evtdev->rating >= dev->rating) ||
	     (dev->features & CLOCK_EVT_FEAT_C3STOP))
75 76
		return 0;

77
	clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
78 79
	if (cur)
		cur->event_handler = clockevents_handle_noop;
80
	tick_broadcast_device.evtdev = dev;
81
	if (!cpumask_empty(tick_broadcast_mask))
82
		tick_broadcast_start_periodic(dev);
83 84 85 86 87 88 89 90 91 92
	/*
	 * Inform all cpus about this. We might be in a situation
	 * where we did not switch to oneshot mode because the per cpu
	 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
	 * of a oneshot capable broadcast device. Without that
	 * notification the systems stays stuck in periodic mode
	 * forever.
	 */
	if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_clock_notify();
93 94 95 96 97 98 99 100 101 102 103
	return 1;
}

/*
 * Check, if the device is the broadcast device
 */
int tick_is_broadcast_device(struct clock_event_device *dev)
{
	return (dev && tick_broadcast_device.evtdev == dev);
}

104 105 106 107 108
static void err_broadcast(const struct cpumask *mask)
{
	pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
}

109 110 111 112 113 114 115 116 117 118 119
static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
{
	if (!dev->broadcast)
		dev->broadcast = tick_broadcast;
	if (!dev->broadcast) {
		pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
			     dev->name);
		dev->broadcast = err_broadcast;
	}
}

120 121 122 123 124 125 126 127 128
/*
 * Check, if the device is disfunctional and a place holder, which
 * needs to be handled by the broadcast device.
 */
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
	unsigned long flags;
	int ret = 0;

129
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
130 131 132 133 134 135 136 137 138

	/*
	 * Devices might be registered with both periodic and oneshot
	 * mode disabled. This signals, that the device needs to be
	 * operated from the broadcast device and is a placeholder for
	 * the cpu local device.
	 */
	if (!tick_device_is_functional(dev)) {
		dev->event_handler = tick_handle_periodic;
139
		tick_device_setup_broadcast_func(dev);
140
		cpumask_set_cpu(cpu, tick_broadcast_mask);
141 142
		tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
		ret = 1;
143 144 145 146 147 148 149 150
	} else {
		/*
		 * When the new device is not affected by the stop
		 * feature and the cpu is marked in the broadcast mask
		 * then clear the broadcast bit.
		 */
		if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
			int cpu = smp_processor_id();
151
			cpumask_clear_cpu(cpu, tick_broadcast_mask);
152
			tick_broadcast_clear_oneshot(cpu);
153 154
		} else {
			tick_device_setup_broadcast_func(dev);
155 156
		}
	}
157
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
158 159 160
	return ret;
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
int tick_receive_broadcast(void)
{
	struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
	struct clock_event_device *evt = td->evtdev;

	if (!evt)
		return -ENODEV;

	if (!evt->event_handler)
		return -EINVAL;

	evt->event_handler(evt);
	return 0;
}
#endif

178
/*
179
 * Broadcast the event to the cpus, which are set in the mask (mangled).
180
 */
181
static void tick_do_broadcast(struct cpumask *mask)
182
{
183
	int cpu = smp_processor_id();
184 185 186 187 188
	struct tick_device *td;

	/*
	 * Check, if the current cpu is in the mask
	 */
189 190
	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
191 192 193 194
		td = &per_cpu(tick_cpu_device, cpu);
		td->evtdev->event_handler(td->evtdev);
	}

195
	if (!cpumask_empty(mask)) {
196 197 198 199 200 201
		/*
		 * It might be necessary to actually check whether the devices
		 * have different broadcast functions. For now, just use the
		 * one of the first device. This works as long as we have this
		 * misfeature only on x86 (lapic)
		 */
202 203
		td = &per_cpu(tick_cpu_device, cpumask_first(mask));
		td->evtdev->broadcast(mask);
204 205 206 207 208 209 210 211 212
	}
}

/*
 * Periodic broadcast:
 * - invoke the broadcast handlers
 */
static void tick_do_periodic_broadcast(void)
{
213
	raw_spin_lock(&tick_broadcast_lock);
214

215 216
	cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
	tick_do_broadcast(tmpmask);
217

218
	raw_spin_unlock(&tick_broadcast_lock);
219 220 221 222 223 224 225
}

/*
 * Event handler for periodic broadcast ticks
 */
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
226 227
	ktime_t next;

228 229 230 231 232 233 234 235 236 237
	tick_do_periodic_broadcast();

	/*
	 * The device is in periodic mode. No reprogramming necessary:
	 */
	if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
		return;

	/*
	 * Setup the next period for devices, which do not have
238
	 * periodic mode. We read dev->next_event first and add to it
239
	 * when the event already expired. clockevents_program_event()
240 241
	 * sets dev->next_event only when the event is really
	 * programmed to the device.
242
	 */
243 244
	for (next = dev->next_event; ;) {
		next = ktime_add(next, tick_period);
245

246
		if (!clockevents_program_event(dev, next, false))
247 248 249 250 251 252 253 254 255
			return;
		tick_do_periodic_broadcast();
	}
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
256
static void tick_do_broadcast_on_off(unsigned long *reason)
257 258 259
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
260
	unsigned long flags;
261
	int cpu, bc_stopped;
262

263
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
264 265 266 267 268 269 270

	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;
	bc = tick_broadcast_device.evtdev;

	/*
271
	 * Is the device not affected by the powerstate ?
272
	 */
273
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
274 275
		goto out;

276 277
	if (!tick_device_is_functional(dev))
		goto out;
278

279
	bc_stopped = cpumask_empty(tick_broadcast_mask);
280

281 282 283
	switch (*reason) {
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
284
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
285 286
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
287
				clockevents_shutdown(dev);
288
		}
289
		if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
290
			tick_broadcast_force = 1;
291 292
		break;
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
293
		if (!tick_broadcast_force &&
294
		    cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
295 296
			if (tick_broadcast_device.mode ==
			    TICKDEV_MODE_PERIODIC)
297 298
				tick_setup_periodic(dev, 0);
		}
299
		break;
300 301
	}

302
	if (cpumask_empty(tick_broadcast_mask)) {
303
		if (!bc_stopped)
304
			clockevents_shutdown(bc);
305
	} else if (bc_stopped) {
306 307
		if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
			tick_broadcast_start_periodic(bc);
308 309
		else
			tick_broadcast_setup_oneshot(bc);
310 311
	}
out:
312
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
313 314 315 316 317 318 319 320
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop.
 */
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
321
	if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
322
		printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
323
		       "offline CPU #%d\n", *oncpu);
324
	else
325
		tick_do_broadcast_on_off(&reason);
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
}

/*
 * Set the periodic handler depending on broadcast on/off
 */
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
	if (!broadcast)
		dev->event_handler = tick_handle_periodic;
	else
		dev->event_handler = tick_handle_periodic_broadcast;
}

/*
 * Remove a CPU from broadcasting
 */
void tick_shutdown_broadcast(unsigned int *cpup)
{
	struct clock_event_device *bc;
	unsigned long flags;
	unsigned int cpu = *cpup;

348
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
349 350

	bc = tick_broadcast_device.evtdev;
351
	cpumask_clear_cpu(cpu, tick_broadcast_mask);
352 353

	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
354
		if (bc && cpumask_empty(tick_broadcast_mask))
355
			clockevents_shutdown(bc);
356 357
	}

358
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
359
}
360

361 362 363 364 365
void tick_suspend_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

366
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
367 368

	bc = tick_broadcast_device.evtdev;
T
Thomas Gleixner 已提交
369
	if (bc)
370
		clockevents_shutdown(bc);
371

372
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
373 374 375 376 377 378 379 380
}

int tick_resume_broadcast(void)
{
	struct clock_event_device *bc;
	unsigned long flags;
	int broadcast = 0;

381
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
382 383 384

	bc = tick_broadcast_device.evtdev;

385
	if (bc) {
T
Thomas Gleixner 已提交
386 387
		clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);

388 389
		switch (tick_broadcast_device.mode) {
		case TICKDEV_MODE_PERIODIC:
390
			if (!cpumask_empty(tick_broadcast_mask))
391
				tick_broadcast_start_periodic(bc);
392
			broadcast = cpumask_test_cpu(smp_processor_id(),
393
						     tick_broadcast_mask);
394 395
			break;
		case TICKDEV_MODE_ONESHOT:
396
			if (!cpumask_empty(tick_broadcast_mask))
397
				broadcast = tick_resume_broadcast_oneshot(bc);
398 399
			break;
		}
400
	}
401
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
402 403 404 405 406

	return broadcast;
}


407 408
#ifdef CONFIG_TICK_ONESHOT

409
static cpumask_var_t tick_broadcast_oneshot_mask;
410
static cpumask_var_t tick_broadcast_pending_mask;
411
static cpumask_var_t tick_broadcast_force_mask;
412

413
/*
414
 * Exposed for debugging: see timer_list.c
415
 */
416
struct cpumask *tick_get_broadcast_oneshot_mask(void)
417
{
418
	return tick_broadcast_oneshot_mask;
419 420
}

421 422 423 424 425 426 427 428 429 430 431 432
/*
 * Called before going idle with interrupts disabled. Checks whether a
 * broadcast event from the other core is about to happen. We detected
 * that in tick_broadcast_oneshot_control(). The callsite can use this
 * to avoid a deep idle transition as we are about to get the
 * broadcast IPI right away.
 */
int tick_check_broadcast_expired(void)
{
	return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}

static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
450
				    ktime_t expires, int force)
451
{
452 453
	int ret;

454 455 456
	if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);

457 458 459 460
	ret = clockevents_program_event(bc, expires, force);
	if (!ret)
		tick_broadcast_set_affinity(bc, cpumask_of(cpu));
	return ret;
461 462
}

463 464 465
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
	clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
466
	return 0;
467 468
}

469 470 471 472 473 474
/*
 * Called from irq_enter() when idle was interrupted to reenable the
 * per cpu device.
 */
void tick_check_oneshot_broadcast(int cpu)
{
475
	if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
476 477 478 479 480 481
		struct tick_device *td = &per_cpu(tick_cpu_device, cpu);

		clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
	}
}

482 483 484 485 486 487
/*
 * Handle oneshot mode broadcasting
 */
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
	struct tick_device *td;
488
	ktime_t now, next_event;
489
	int cpu, next_cpu = 0;
490

491
	raw_spin_lock(&tick_broadcast_lock);
492 493
again:
	dev->next_event.tv64 = KTIME_MAX;
494
	next_event.tv64 = KTIME_MAX;
495
	cpumask_clear(tmpmask);
496 497
	now = ktime_get();
	/* Find all expired events */
498
	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
499
		td = &per_cpu(tick_cpu_device, cpu);
500
		if (td->evtdev->next_event.tv64 <= now.tv64) {
501
			cpumask_set_cpu(cpu, tmpmask);
502 503 504 505 506 507
			/*
			 * Mark the remote cpu in the pending mask, so
			 * it can avoid reprogramming the cpu local
			 * timer in tick_broadcast_oneshot_control().
			 */
			cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
508
		} else if (td->evtdev->next_event.tv64 < next_event.tv64) {
509
			next_event.tv64 = td->evtdev->next_event.tv64;
510 511
			next_cpu = cpu;
		}
512 513
	}

514 515 516 517
	/* Take care of enforced broadcast requests */
	cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
	cpumask_clear(tick_broadcast_force_mask);

518
	/*
519 520
	 * Wakeup the cpus which have an expired event.
	 */
521
	tick_do_broadcast(tmpmask);
522 523 524 525 526 527 528 529 530 531

	/*
	 * Two reasons for reprogram:
	 *
	 * - The global event did not expire any CPU local
	 * events. This happens in dyntick mode, as the maximum PIT
	 * delta is quite small.
	 *
	 * - There are pending events on sleeping CPUs which were not
	 * in the event mask
532
	 */
533
	if (next_event.tv64 != KTIME_MAX) {
534
		/*
535 536
		 * Rearm the broadcast device. If event expired,
		 * repeat the above
537
		 */
538
		if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
539 540
			goto again;
	}
541
	raw_spin_unlock(&tick_broadcast_lock);
542 543 544 545 546 547 548 549 550 551 552
}

/*
 * Powerstate information: The system enters/leaves a state, where
 * affected devices might stop
 */
void tick_broadcast_oneshot_control(unsigned long reason)
{
	struct clock_event_device *bc, *dev;
	struct tick_device *td;
	unsigned long flags;
553
	ktime_t now;
554 555 556 557 558 559 560
	int cpu;

	/*
	 * Periodic mode does not care about the enter/exit of power
	 * states
	 */
	if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
561
		return;
562

563 564 565 566
	/*
	 * We are called with preemtion disabled from the depth of the
	 * idle code, so we can't be moved away.
	 */
567 568 569 570 571
	cpu = smp_processor_id();
	td = &per_cpu(tick_cpu_device, cpu);
	dev = td->evtdev;

	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
572 573 574
		return;

	bc = tick_broadcast_device.evtdev;
575

576
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
577
	if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
578
		WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
579
		if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
580
			clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
581 582 583 584 585 586 587 588 589 590
			/*
			 * We only reprogram the broadcast timer if we
			 * did not mark ourself in the force mask and
			 * if the cpu local event is earlier than the
			 * broadcast event. If the current CPU is in
			 * the force mask, then we are going to be
			 * woken by the IPI right away.
			 */
			if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
			    dev->next_event.tv64 < bc->next_event.tv64)
591
				tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
592 593
		}
	} else {
594
		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
595
			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
			if (dev->next_event.tv64 == KTIME_MAX)
				goto out;
			/*
			 * The cpu which was handling the broadcast
			 * timer marked this cpu in the broadcast
			 * pending mask and fired the broadcast
			 * IPI. So we are going to handle the expired
			 * event anyway via the broadcast IPI
			 * handler. No need to reprogram the timer
			 * with an already expired event.
			 */
			if (cpumask_test_and_clear_cpu(cpu,
				       tick_broadcast_pending_mask))
				goto out;

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
			/*
			 * If the pending bit is not set, then we are
			 * either the CPU handling the broadcast
			 * interrupt or we got woken by something else.
			 *
			 * We are not longer in the broadcast mask, so
			 * if the cpu local expiry time is already
			 * reached, we would reprogram the cpu local
			 * timer with an already expired event.
			 *
			 * This can lead to a ping-pong when we return
			 * to idle and therefor rearm the broadcast
			 * timer before the cpu local timer was able
			 * to fire. This happens because the forced
			 * reprogramming makes sure that the event
			 * will happen in the future and depending on
			 * the min_delta setting this might be far
			 * enough out that the ping-pong starts.
			 *
			 * If the cpu local next_event has expired
			 * then we know that the broadcast timer
			 * next_event has expired as well and
			 * broadcast is about to be handled. So we
			 * avoid reprogramming and enforce that the
			 * broadcast handler, which did not run yet,
			 * will invoke the cpu local handler.
			 *
			 * We cannot call the handler directly from
			 * here, because we might be in a NOHZ phase
			 * and we did not go through the irq_enter()
			 * nohz fixups.
			 */
			now = ktime_get();
			if (dev->next_event.tv64 <= now.tv64) {
				cpumask_set_cpu(cpu, tick_broadcast_force_mask);
				goto out;
			}
			/*
			 * We got woken by something else. Reprogram
			 * the cpu local timer device.
			 */
652
			tick_program_event(dev->next_event, 1);
653 654
		}
	}
655
out:
656
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
657 658
}

659 660 661 662 663 664 665
/*
 * Reset the one shot broadcast for a cpu
 *
 * Called with tick_broadcast_lock held
 */
static void tick_broadcast_clear_oneshot(int cpu)
{
666
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
667 668
}

669 670
static void tick_broadcast_init_next_event(struct cpumask *mask,
					   ktime_t expires)
671 672 673 674
{
	struct tick_device *td;
	int cpu;

675
	for_each_cpu(cpu, mask) {
676 677 678 679 680 681
		td = &per_cpu(tick_cpu_device, cpu);
		if (td->evtdev)
			td->evtdev->next_event = expires;
	}
}

682
/**
683
 * tick_broadcast_setup_oneshot - setup the broadcast device
684 685 686
 */
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
687 688
	int cpu = smp_processor_id();

689 690
	/* Set it up only once ! */
	if (bc->event_handler != tick_handle_oneshot_broadcast) {
691 692
		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;

693
		bc->event_handler = tick_handle_oneshot_broadcast;
694 695

		/* Take the do_timer update */
696
		if (!tick_nohz_full_cpu(cpu))
697
			tick_do_timer_cpu = cpu;
698 699 700 701 702 703 704

		/*
		 * We must be careful here. There might be other CPUs
		 * waiting for periodic broadcast. We need to set the
		 * oneshot_mask bits for those and program the
		 * broadcast device to fire.
		 */
705 706 707 708
		cpumask_copy(tmpmask, tick_broadcast_mask);
		cpumask_clear_cpu(cpu, tmpmask);
		cpumask_or(tick_broadcast_oneshot_mask,
			   tick_broadcast_oneshot_mask, tmpmask);
709

710
		if (was_periodic && !cpumask_empty(tmpmask)) {
711
			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
712
			tick_broadcast_init_next_event(tmpmask,
713
						       tick_next_period);
714
			tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
715 716
		} else
			bc->next_event.tv64 = KTIME_MAX;
717 718 719 720 721 722 723 724 725
	} else {
		/*
		 * The first cpu which switches to oneshot mode sets
		 * the bit for all other cpus which are in the general
		 * (periodic) broadcast mask. So the bit is set and
		 * would prevent the first broadcast enter after this
		 * to program the bc device.
		 */
		tick_broadcast_clear_oneshot(cpu);
726
	}
727 728 729 730 731 732 733 734 735 736
}

/*
 * Select oneshot operating mode for the broadcast device
 */
void tick_broadcast_switch_to_oneshot(void)
{
	struct clock_event_device *bc;
	unsigned long flags;

737
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
738 739

	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
740 741 742
	bc = tick_broadcast_device.evtdev;
	if (bc)
		tick_broadcast_setup_oneshot(bc);
743

744
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
745 746 747 748 749 750 751 752 753 754 755
}


/*
 * Remove a dead CPU from broadcasting
 */
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
	unsigned long flags;
	unsigned int cpu = *cpup;

756
	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
757

758 759 760 761
	/*
	 * Clear the broadcast mask flag for the dead cpu, but do not
	 * stop the broadcast device!
	 */
762
	cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
763

764
	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
765 766
}

767 768 769 770 771 772 773 774
/*
 * Check, whether the broadcast device is in one shot mode
 */
int tick_broadcast_oneshot_active(void)
{
	return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}

775 776 777 778 779 780 781 782 783 784
/*
 * Check whether the broadcast device supports oneshot.
 */
bool tick_broadcast_oneshot_available(void)
{
	struct clock_event_device *bc = tick_broadcast_device.evtdev;

	return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}

785
#endif
786 787 788

void __init tick_broadcast_init(void)
{
789 790
	zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
791
#ifdef CONFIG_TICK_ONESHOT
792 793 794
	zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
	zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
795 796
#endif
}