tick-common.c 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * linux/kernel/time/tick-common.c
 *
 * This file contains the base functions to manage periodic tick
 * related events.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
17
#include <linux/interrupt.h>
18 19 20 21 22
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/tick.h>

23 24
#include <asm/irq_regs.h>

25 26
#include "tick-internal.h"

27 28 29
/*
 * Tick devices
 */
30
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
31 32 33
/*
 * Tick next event: keeps track of the tick time
 */
34 35
ktime_t tick_next_period;
ktime_t tick_period;
36
int tick_do_timer_cpu __read_mostly = -1;
37
DEFINE_SPINLOCK(tick_device_lock);
38

39 40 41 42 43 44 45 46
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_device(int cpu)
{
	return &per_cpu(tick_cpu_device, cpu);
}

47 48 49 50 51 52 53 54 55 56
/**
 * tick_is_oneshot_available - check for a oneshot capable event device
 */
int tick_is_oneshot_available(void)
{
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;

	return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * Periodic tick
 */
static void tick_periodic(int cpu)
{
	if (tick_do_timer_cpu == cpu) {
		write_seqlock(&xtime_lock);

		/* Keep track of the next tick event */
		tick_next_period = ktime_add(tick_next_period, tick_period);

		do_timer(1);
		write_sequnlock(&xtime_lock);
	}

	update_process_times(user_mode(get_irq_regs()));
	profile_tick(CPU_PROFILING);
}

/*
 * Event handler for periodic ticks
 */
void tick_handle_periodic(struct clock_event_device *dev)
{
	int cpu = smp_processor_id();
82
	ktime_t next;
83 84 85 86 87 88 89 90 91

	tick_periodic(cpu);

	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
		return;
	/*
	 * Setup the next period for devices, which do not have
	 * periodic mode:
	 */
92
	next = ktime_add(dev->next_event, tick_period);
93 94 95 96
	for (;;) {
		if (!clockevents_program_event(dev, next, ktime_get()))
			return;
		tick_periodic(cpu);
97
		next = ktime_add(next, tick_period);
98 99 100 101 102 103
	}
}

/*
 * Setup the device for a periodic tick
 */
104
void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
105
{
106 107 108 109 110
	tick_set_periodic_handler(dev, broadcast);

	/* Broadcast setup ? */
	if (!tick_device_is_functional(dev))
		return;
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137

	if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
		clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
	} else {
		unsigned long seq;
		ktime_t next;

		do {
			seq = read_seqbegin(&xtime_lock);
			next = tick_next_period;
		} while (read_seqretry(&xtime_lock, seq));

		clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);

		for (;;) {
			if (!clockevents_program_event(dev, next, ktime_get()))
				return;
			next = ktime_add(next, tick_period);
		}
	}
}

/*
 * Setup the tick device
 */
static void tick_setup_device(struct tick_device *td,
			      struct clock_event_device *newdev, int cpu,
138
			      const cpumask_t *cpumask)
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
{
	ktime_t next_event;
	void (*handler)(struct clock_event_device *) = NULL;

	/*
	 * First device setup ?
	 */
	if (!td->evtdev) {
		/*
		 * If no cpu took the do_timer update, assign it to
		 * this cpu:
		 */
		if (tick_do_timer_cpu == -1) {
			tick_do_timer_cpu = cpu;
			tick_next_period = ktime_get();
			tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
		}

		/*
		 * Startup in periodic mode first.
		 */
		td->mode = TICKDEV_MODE_PERIODIC;
	} else {
		handler = td->evtdev->event_handler;
		next_event = td->evtdev->next_event;
	}

	td->evtdev = newdev;

	/*
	 * When the device is not per cpu, pin the interrupt to the
	 * current cpu:
	 */
172 173
	if (!cpus_equal(newdev->cpumask, *cpumask))
		irq_set_affinity(newdev->irq, *cpumask);
174

175 176 177 178 179 180 181 182 183
	/*
	 * When global broadcasting is active, check if the current
	 * device is registered as a placeholder for broadcast mode.
	 * This allows us to handle this x86 misfeature in a generic
	 * way.
	 */
	if (tick_device_uses_broadcast(newdev, cpu))
		return;

184 185
	if (td->mode == TICKDEV_MODE_PERIODIC)
		tick_setup_periodic(newdev, 0);
186 187
	else
		tick_setup_oneshot(newdev, handler, next_event);
188 189 190 191 192 193 194 195 196 197 198
}

/*
 * Check, if the new registered device should be used.
 */
static int tick_check_new_device(struct clock_event_device *newdev)
{
	struct clock_event_device *curdev;
	struct tick_device *td;
	int cpu, ret = NOTIFY_OK;
	unsigned long flags;
199
	cpumask_of_cpu_ptr_declare(cpumask);
200 201 202 203

	spin_lock_irqsave(&tick_device_lock, flags);

	cpu = smp_processor_id();
204
	cpumask_of_cpu_ptr_next(cpumask, cpu);
205
	if (!cpu_isset(cpu, newdev->cpumask))
206
		goto out_bc;
207 208 209 210 211

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;

	/* cpu local device ? */
212
	if (!cpus_equal(newdev->cpumask, *cpumask)) {
213 214 215 216 217 218 219 220 221 222 223 224

		/*
		 * If the cpu affinity of the device interrupt can not
		 * be set, ignore it.
		 */
		if (!irq_can_set_affinity(newdev->irq))
			goto out_bc;

		/*
		 * If we have a cpu local device already, do not replace it
		 * by a non cpu local device
		 */
225
		if (curdev && cpus_equal(curdev->cpumask, *cpumask))
226 227 228 229 230 231 232 233
			goto out_bc;
	}

	/*
	 * If we have an active device, then check the rating and the oneshot
	 * feature.
	 */
	if (curdev) {
234 235 236 237 238 239
		/*
		 * Prefer one shot capable devices !
		 */
		if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
		    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
			goto out_bc;
240 241 242 243
		/*
		 * Check the rating
		 */
		if (curdev->rating >= newdev->rating)
244
			goto out_bc;
245 246 247 248
	}

	/*
	 * Replace the eventually existing device by the new
249 250
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
251
	 */
252 253 254 255
	if (tick_is_broadcast_device(curdev)) {
		clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
		curdev = NULL;
	}
256 257
	clockevents_exchange_device(curdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask);
258 259
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();
260

261 262 263 264 265 266 267 268 269
	spin_unlock_irqrestore(&tick_device_lock, flags);
	return NOTIFY_STOP;

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
	if (tick_check_broadcast_device(newdev))
		ret = NOTIFY_STOP;
270

271
	spin_unlock_irqrestore(&tick_device_lock, flags);
272

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
	return ret;
}

/*
 * Shutdown an event device on a given cpu:
 *
 * This is called on a life CPU, when a CPU is dead. So we cannot
 * access the hardware device itself.
 * We just set the mode and remove it from the lists.
 */
static void tick_shutdown(unsigned int *cpup)
{
	struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
	struct clock_event_device *dev = td->evtdev;
	unsigned long flags;

	spin_lock_irqsave(&tick_device_lock, flags);
	td->mode = TICKDEV_MODE_PERIODIC;
	if (dev) {
		/*
		 * Prevent that the clock events layer tries to call
		 * the set mode function!
		 */
		dev->mode = CLOCK_EVT_MODE_UNUSED;
		clockevents_exchange_device(dev, NULL);
		td->evtdev = NULL;
	}
300 301 302 303 304 305
	/* Transfer the do_timer job away from this cpu */
	if (*cpup == tick_do_timer_cpu) {
		int cpu = first_cpu(cpu_online_map);

		tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
	}
306 307 308
	spin_unlock_irqrestore(&tick_device_lock, flags);
}

309
static void tick_suspend(void)
310 311 312 313 314
{
	struct tick_device *td = &__get_cpu_var(tick_cpu_device);
	unsigned long flags;

	spin_lock_irqsave(&tick_device_lock, flags);
315
	clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
316 317 318
	spin_unlock_irqrestore(&tick_device_lock, flags);
}

319
static void tick_resume(void)
320 321 322
{
	struct tick_device *td = &__get_cpu_var(tick_cpu_device);
	unsigned long flags;
T
Thomas Gleixner 已提交
323
	int broadcast = tick_resume_broadcast();
324 325

	spin_lock_irqsave(&tick_device_lock, flags);
T
Thomas Gleixner 已提交
326 327 328 329 330 331 332 333
	clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);

	if (!broadcast) {
		if (td->mode == TICKDEV_MODE_PERIODIC)
			tick_setup_periodic(td->evtdev, 0);
		else
			tick_resume_oneshot();
	}
334 335 336
	spin_unlock_irqrestore(&tick_device_lock, flags);
}

337 338 339 340 341 342 343 344 345 346 347
/*
 * Notification about clock event devices
 */
static int tick_notify(struct notifier_block *nb, unsigned long reason,
			       void *dev)
{
	switch (reason) {

	case CLOCK_EVT_NOTIFY_ADD:
		return tick_check_new_device(dev);

348 349
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
350
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
351 352 353
		tick_broadcast_on_off(reason, dev);
		break;

354 355 356 357 358
	case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
	case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
		tick_broadcast_oneshot_control(reason);
		break;

359
	case CLOCK_EVT_NOTIFY_CPU_DEAD:
360
		tick_shutdown_broadcast_oneshot(dev);
361
		tick_shutdown_broadcast(dev);
362 363 364
		tick_shutdown(dev);
		break;

365
	case CLOCK_EVT_NOTIFY_SUSPEND:
366
		tick_suspend();
367 368 369 370
		tick_suspend_broadcast();
		break;

	case CLOCK_EVT_NOTIFY_RESUME:
T
Thomas Gleixner 已提交
371
		tick_resume();
372 373
		break;

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block tick_notifier = {
	.notifier_call = tick_notify,
};

/**
 * tick_init - initialize the tick control
 *
 * Register the notifier with the clockevents framework
 */
void __init tick_init(void)
{
	clockevents_register_notifier(&tick_notifier);
}