tick-common.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-common.c
 *
 * This file contains the base functions to manage periodic tick
 * related events.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
21
#include <linux/module.h>
22

23 24
#include <asm/irq_regs.h>

25 26
#include "tick-internal.h"

27 28 29
/*
 * Tick devices
 */
30
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
31 32 33
/*
 * Tick next event: keeps track of the tick time
 */
34 35
ktime_t tick_next_period;
ktime_t tick_period;
36
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
37

38 39 40 41 42 43 44 45
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_device(int cpu)
{
	return &per_cpu(tick_cpu_device, cpu);
}

46 47 48 49 50
/**
 * tick_is_oneshot_available - check for a oneshot capable event device
 */
int tick_is_oneshot_available(void)
{
51
	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
52

53 54 55 56 57
	if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
		return 0;
	if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
		return 1;
	return tick_broadcast_oneshot_available();
58 59
}

60 61 62 63 64 65
/*
 * Periodic tick
 */
static void tick_periodic(int cpu)
{
	if (tick_do_timer_cpu == cpu) {
66
		write_seqlock(&jiffies_lock);
67 68 69 70 71

		/* Keep track of the next tick event */
		tick_next_period = ktime_add(tick_next_period, tick_period);

		do_timer(1);
72
		write_sequnlock(&jiffies_lock);
73 74 75 76 77 78 79 80 81 82 83 84
	}

	update_process_times(user_mode(get_irq_regs()));
	profile_tick(CPU_PROFILING);
}

/*
 * Event handler for periodic ticks
 */
void tick_handle_periodic(struct clock_event_device *dev)
{
	int cpu = smp_processor_id();
85
	ktime_t next;
86 87 88 89 90 91 92 93 94

	tick_periodic(cpu);

	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
		return;
	/*
	 * Setup the next period for devices, which do not have
	 * periodic mode:
	 */
95
	next = ktime_add(dev->next_event, tick_period);
96
	for (;;) {
97
		if (!clockevents_program_event(dev, next, false))
98
			return;
99 100 101 102 103 104 105 106 107 108 109
		/*
		 * Have to be careful here. If we're in oneshot mode,
		 * before we call tick_periodic() in a loop, we need
		 * to be sure we're using a real hardware clocksource.
		 * Otherwise we could get trapped in an infinite
		 * loop, as the tick_periodic() increments jiffies,
		 * when then will increment time, posibly causing
		 * the loop to trigger again and again.
		 */
		if (timekeeping_valid_for_hres())
			tick_periodic(cpu);
110
		next = ktime_add(next, tick_period);
111 112 113 114 115 116
	}
}

/*
 * Setup the device for a periodic tick
 */
117
void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
118
{
119 120 121 122 123
	tick_set_periodic_handler(dev, broadcast);

	/* Broadcast setup ? */
	if (!tick_device_is_functional(dev))
		return;
124

125 126
	if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
	    !tick_broadcast_oneshot_active()) {
127 128 129 130 131 132
		clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
	} else {
		unsigned long seq;
		ktime_t next;

		do {
133
			seq = read_seqbegin(&jiffies_lock);
134
			next = tick_next_period;
135
		} while (read_seqretry(&jiffies_lock, seq));
136 137 138 139

		clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);

		for (;;) {
140
			if (!clockevents_program_event(dev, next, false))
141 142 143 144 145 146 147 148 149 150 151
				return;
			next = ktime_add(next, tick_period);
		}
	}
}

/*
 * Setup the tick device
 */
static void tick_setup_device(struct tick_device *td,
			      struct clock_event_device *newdev, int cpu,
152
			      const struct cpumask *cpumask)
153 154 155 156 157 158 159 160 161 162 163 164
{
	ktime_t next_event;
	void (*handler)(struct clock_event_device *) = NULL;

	/*
	 * First device setup ?
	 */
	if (!td->evtdev) {
		/*
		 * If no cpu took the do_timer update, assign it to
		 * this cpu:
		 */
165
		if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
166
			if (!tick_nohz_full_cpu(cpu))
167 168 169
				tick_do_timer_cpu = cpu;
			else
				tick_do_timer_cpu = TICK_DO_TIMER_NONE;
170 171 172 173 174 175 176 177 178 179 180
			tick_next_period = ktime_get();
			tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
		}

		/*
		 * Startup in periodic mode first.
		 */
		td->mode = TICKDEV_MODE_PERIODIC;
	} else {
		handler = td->evtdev->event_handler;
		next_event = td->evtdev->next_event;
181
		td->evtdev->event_handler = clockevents_handle_noop;
182 183 184 185 186 187 188 189
	}

	td->evtdev = newdev;

	/*
	 * When the device is not per cpu, pin the interrupt to the
	 * current cpu:
	 */
190
	if (!cpumask_equal(newdev->cpumask, cpumask))
191
		irq_set_affinity(newdev->irq, cpumask);
192

193 194 195 196 197 198 199 200 201
	/*
	 * When global broadcasting is active, check if the current
	 * device is registered as a placeholder for broadcast mode.
	 * This allows us to handle this x86 misfeature in a generic
	 * way.
	 */
	if (tick_device_uses_broadcast(newdev, cpu))
		return;

202 203
	if (td->mode == TICKDEV_MODE_PERIODIC)
		tick_setup_periodic(newdev, 0);
204 205
	else
		tick_setup_oneshot(newdev, handler, next_event);
206 207
}

208 209 210 211 212 213 214 215 216 217 218
void tick_install_replacement(struct clock_event_device *newdev)
{
	struct tick_device *td = &__get_cpu_var(tick_cpu_device);
	int cpu = smp_processor_id();

	clockevents_exchange_device(td->evtdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
static bool tick_check_percpu(struct clock_event_device *curdev,
			      struct clock_event_device *newdev, int cpu)
{
	if (!cpumask_test_cpu(cpu, newdev->cpumask))
		return false;
	if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
		return true;
	/* Check if irq affinity can be set */
	if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
		return false;
	/* Prefer an existing cpu local device */
	if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
		return false;
	return true;
}

static bool tick_check_preferred(struct clock_event_device *curdev,
				 struct clock_event_device *newdev)
{
	/* Prefer oneshot capable device */
	if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
		if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
			return false;
		if (tick_oneshot_mode_active())
			return false;
	}

	/* Use the higher rated one */
	return !curdev || newdev->rating > curdev->rating;
}

250 251 252 253 254 255 256 257 258 259 260 261 262
/*
 * Check whether the new device is a better fit than curdev. curdev
 * can be NULL !
 */
bool tick_check_replacement(struct clock_event_device *curdev,
			    struct clock_event_device *newdev)
{
	if (tick_check_percpu(curdev, newdev, smp_processor_id()))
		return false;

	return tick_check_preferred(curdev, newdev);
}

263
/*
T
Thomas Gleixner 已提交
264 265
 * Check, if the new registered device should be used. Called with
 * clockevents_lock held and interrupts disabled.
266
 */
267
void tick_check_new_device(struct clock_event_device *newdev)
268 269 270
{
	struct clock_event_device *curdev;
	struct tick_device *td;
271
	int cpu;
272 273

	cpu = smp_processor_id();
274
	if (!cpumask_test_cpu(cpu, newdev->cpumask))
275
		goto out_bc;
276 277 278 279 280

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;

	/* cpu local device ? */
281 282
	if (!tick_check_percpu(curdev, newdev, cpu))
		goto out_bc;
283

284 285 286
	/* Preference decision */
	if (!tick_check_preferred(curdev, newdev))
		goto out_bc;
287

288 289 290
	if (!try_module_get(newdev->owner))
		return;

291 292
	/*
	 * Replace the eventually existing device by the new
293 294
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
295
	 */
296
	if (tick_is_broadcast_device(curdev)) {
297
		clockevents_shutdown(curdev);
298 299
		curdev = NULL;
	}
300
	clockevents_exchange_device(curdev, newdev);
301
	tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
302 303
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();
304
	return;
305 306 307 308 309

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
310
	tick_install_broadcast_device(newdev);
311 312
}

313 314 315 316 317
/*
 * Transfer the do_timer job away from a dying cpu.
 *
 * Called with interrupts disabled.
 */
318
void tick_handover_do_timer(int *cpup)
319 320 321 322 323 324 325 326 327
{
	if (*cpup == tick_do_timer_cpu) {
		int cpu = cpumask_first(cpu_online_mask);

		tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
			TICK_DO_TIMER_NONE;
	}
}

328 329 330 331 332 333 334
/*
 * Shutdown an event device on a given cpu:
 *
 * This is called on a life CPU, when a CPU is dead. So we cannot
 * access the hardware device itself.
 * We just set the mode and remove it from the lists.
 */
335
void tick_shutdown(unsigned int *cpup)
336 337 338 339 340 341 342 343 344 345 346 347
{
	struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
	struct clock_event_device *dev = td->evtdev;

	td->mode = TICKDEV_MODE_PERIODIC;
	if (dev) {
		/*
		 * Prevent that the clock events layer tries to call
		 * the set mode function!
		 */
		dev->mode = CLOCK_EVT_MODE_UNUSED;
		clockevents_exchange_device(dev, NULL);
348
		dev->event_handler = clockevents_handle_noop;
349 350 351 352
		td->evtdev = NULL;
	}
}

353
void tick_suspend(void)
354 355 356
{
	struct tick_device *td = &__get_cpu_var(tick_cpu_device);

357
	clockevents_shutdown(td->evtdev);
358 359
}

360
void tick_resume(void)
361 362
{
	struct tick_device *td = &__get_cpu_var(tick_cpu_device);
T
Thomas Gleixner 已提交
363
	int broadcast = tick_resume_broadcast();
364

T
Thomas Gleixner 已提交
365 366 367 368 369 370 371 372
	clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);

	if (!broadcast) {
		if (td->mode == TICKDEV_MODE_PERIODIC)
			tick_setup_periodic(td->evtdev, 0);
		else
			tick_resume_oneshot();
	}
373 374
}

375 376 377 378 379
/**
 * tick_init - initialize the tick control
 */
void __init tick_init(void)
{
380
	tick_broadcast_init();
381
}