tick-common.c 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * linux/kernel/time/tick-common.c
 *
 * This file contains the base functions to manage periodic tick
 * related events.
 *
 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
 *
 * This code is licenced under the GPL version 2. For details see
 * kernel-base/COPYING.
 */
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/irq.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/tick.h>

23 24
#include "tick-internal.h"

25 26 27
/*
 * Tick devices
 */
28
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
29 30 31
/*
 * Tick next event: keeps track of the tick time
 */
32 33
ktime_t tick_next_period;
ktime_t tick_period;
34
int tick_do_timer_cpu __read_mostly = -1;
35
DEFINE_SPINLOCK(tick_device_lock);
36

37 38 39 40 41 42 43 44
/*
 * Debugging: see timer_list.c
 */
struct tick_device *tick_get_device(int cpu)
{
	return &per_cpu(tick_cpu_device, cpu);
}

45 46 47 48 49 50 51 52 53 54
/**
 * tick_is_oneshot_available - check for a oneshot capable event device
 */
int tick_is_oneshot_available(void)
{
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;

	return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
}

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * Periodic tick
 */
static void tick_periodic(int cpu)
{
	if (tick_do_timer_cpu == cpu) {
		write_seqlock(&xtime_lock);

		/* Keep track of the next tick event */
		tick_next_period = ktime_add(tick_next_period, tick_period);

		do_timer(1);
		write_sequnlock(&xtime_lock);
	}

	update_process_times(user_mode(get_irq_regs()));
	profile_tick(CPU_PROFILING);
}

/*
 * Event handler for periodic ticks
 */
void tick_handle_periodic(struct clock_event_device *dev)
{
	int cpu = smp_processor_id();
80
	ktime_t next;
81 82 83 84 85 86 87 88 89

	tick_periodic(cpu);

	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
		return;
	/*
	 * Setup the next period for devices, which do not have
	 * periodic mode:
	 */
90
	next = ktime_add(dev->next_event, tick_period);
91 92 93 94
	for (;;) {
		if (!clockevents_program_event(dev, next, ktime_get()))
			return;
		tick_periodic(cpu);
95
		next = ktime_add(next, tick_period);
96 97 98 99 100 101
	}
}

/*
 * Setup the device for a periodic tick
 */
102
void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
103
{
104 105 106 107 108
	tick_set_periodic_handler(dev, broadcast);

	/* Broadcast setup ? */
	if (!tick_device_is_functional(dev))
		return;
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

	if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
		clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
	} else {
		unsigned long seq;
		ktime_t next;

		do {
			seq = read_seqbegin(&xtime_lock);
			next = tick_next_period;
		} while (read_seqretry(&xtime_lock, seq));

		clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);

		for (;;) {
			if (!clockevents_program_event(dev, next, ktime_get()))
				return;
			next = ktime_add(next, tick_period);
		}
	}
}

/*
 * Setup the tick device
 */
static void tick_setup_device(struct tick_device *td,
			      struct clock_event_device *newdev, int cpu,
			      cpumask_t cpumask)
{
	ktime_t next_event;
	void (*handler)(struct clock_event_device *) = NULL;

	/*
	 * First device setup ?
	 */
	if (!td->evtdev) {
		/*
		 * If no cpu took the do_timer update, assign it to
		 * this cpu:
		 */
		if (tick_do_timer_cpu == -1) {
			tick_do_timer_cpu = cpu;
			tick_next_period = ktime_get();
			tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
		}

		/*
		 * Startup in periodic mode first.
		 */
		td->mode = TICKDEV_MODE_PERIODIC;
	} else {
		handler = td->evtdev->event_handler;
		next_event = td->evtdev->next_event;
	}

	td->evtdev = newdev;

	/*
	 * When the device is not per cpu, pin the interrupt to the
	 * current cpu:
	 */
	if (!cpus_equal(newdev->cpumask, cpumask))
		irq_set_affinity(newdev->irq, cpumask);

173 174 175 176 177 178 179 180 181
	/*
	 * When global broadcasting is active, check if the current
	 * device is registered as a placeholder for broadcast mode.
	 * This allows us to handle this x86 misfeature in a generic
	 * way.
	 */
	if (tick_device_uses_broadcast(newdev, cpu))
		return;

182 183
	if (td->mode == TICKDEV_MODE_PERIODIC)
		tick_setup_periodic(newdev, 0);
184 185
	else
		tick_setup_oneshot(newdev, handler, next_event);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
}

/*
 * Check, if the new registered device should be used.
 */
static int tick_check_new_device(struct clock_event_device *newdev)
{
	struct clock_event_device *curdev;
	struct tick_device *td;
	int cpu, ret = NOTIFY_OK;
	unsigned long flags;
	cpumask_t cpumask;

	spin_lock_irqsave(&tick_device_lock, flags);

	cpu = smp_processor_id();
	if (!cpu_isset(cpu, newdev->cpumask))
203
		goto out_bc;
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;
	cpumask = cpumask_of_cpu(cpu);

	/* cpu local device ? */
	if (!cpus_equal(newdev->cpumask, cpumask)) {

		/*
		 * If the cpu affinity of the device interrupt can not
		 * be set, ignore it.
		 */
		if (!irq_can_set_affinity(newdev->irq))
			goto out_bc;

		/*
		 * If we have a cpu local device already, do not replace it
		 * by a non cpu local device
		 */
		if (curdev && cpus_equal(curdev->cpumask, cpumask))
			goto out_bc;
	}

	/*
	 * If we have an active device, then check the rating and the oneshot
	 * feature.
	 */
	if (curdev) {
232 233 234 235 236 237
		/*
		 * Prefer one shot capable devices !
		 */
		if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
		    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
			goto out_bc;
238 239 240 241
		/*
		 * Check the rating
		 */
		if (curdev->rating >= newdev->rating)
242
			goto out_bc;
243 244 245 246
	}

	/*
	 * Replace the eventually existing device by the new
247 248
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
249
	 */
250 251 252 253
	if (tick_is_broadcast_device(curdev)) {
		clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
		curdev = NULL;
	}
254 255
	clockevents_exchange_device(curdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask);
256 257
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();
258

259 260 261 262 263 264 265 266 267
	spin_unlock_irqrestore(&tick_device_lock, flags);
	return NOTIFY_STOP;

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
	if (tick_check_broadcast_device(newdev))
		ret = NOTIFY_STOP;
268

269
	spin_unlock_irqrestore(&tick_device_lock, flags);
270

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
	return ret;
}

/*
 * Shutdown an event device on a given cpu:
 *
 * This is called on a life CPU, when a CPU is dead. So we cannot
 * access the hardware device itself.
 * We just set the mode and remove it from the lists.
 */
static void tick_shutdown(unsigned int *cpup)
{
	struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
	struct clock_event_device *dev = td->evtdev;
	unsigned long flags;

	spin_lock_irqsave(&tick_device_lock, flags);
	td->mode = TICKDEV_MODE_PERIODIC;
	if (dev) {
		/*
		 * Prevent that the clock events layer tries to call
		 * the set mode function!
		 */
		dev->mode = CLOCK_EVT_MODE_UNUSED;
		clockevents_exchange_device(dev, NULL);
		td->evtdev = NULL;
	}
298 299 300 301 302 303
	/* Transfer the do_timer job away from this cpu */
	if (*cpup == tick_do_timer_cpu) {
		int cpu = first_cpu(cpu_online_map);

		tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
	}
304 305 306
	spin_unlock_irqrestore(&tick_device_lock, flags);
}

307
static void tick_suspend(void)
308 309 310 311 312
{
	struct tick_device *td = &__get_cpu_var(tick_cpu_device);
	unsigned long flags;

	spin_lock_irqsave(&tick_device_lock, flags);
313
	clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
314 315 316
	spin_unlock_irqrestore(&tick_device_lock, flags);
}

317
static void tick_resume(void)
318 319 320
{
	struct tick_device *td = &__get_cpu_var(tick_cpu_device);
	unsigned long flags;
T
Thomas Gleixner 已提交
321
	int broadcast = tick_resume_broadcast();
322 323

	spin_lock_irqsave(&tick_device_lock, flags);
T
Thomas Gleixner 已提交
324 325 326 327 328 329 330 331
	clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);

	if (!broadcast) {
		if (td->mode == TICKDEV_MODE_PERIODIC)
			tick_setup_periodic(td->evtdev, 0);
		else
			tick_resume_oneshot();
	}
332 333 334
	spin_unlock_irqrestore(&tick_device_lock, flags);
}

335 336 337 338 339 340 341 342 343 344 345
/*
 * Notification about clock event devices
 */
static int tick_notify(struct notifier_block *nb, unsigned long reason,
			       void *dev)
{
	switch (reason) {

	case CLOCK_EVT_NOTIFY_ADD:
		return tick_check_new_device(dev);

346 347
	case CLOCK_EVT_NOTIFY_BROADCAST_ON:
	case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
348
	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
349 350 351
		tick_broadcast_on_off(reason, dev);
		break;

352 353 354 355 356
	case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
	case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
		tick_broadcast_oneshot_control(reason);
		break;

357
	case CLOCK_EVT_NOTIFY_CPU_DEAD:
358
		tick_shutdown_broadcast_oneshot(dev);
359
		tick_shutdown_broadcast(dev);
360 361 362
		tick_shutdown(dev);
		break;

363
	case CLOCK_EVT_NOTIFY_SUSPEND:
364
		tick_suspend();
365 366 367 368
		tick_suspend_broadcast();
		break;

	case CLOCK_EVT_NOTIFY_RESUME:
T
Thomas Gleixner 已提交
369
		tick_resume();
370 371
		break;

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block tick_notifier = {
	.notifier_call = tick_notify,
};

/**
 * tick_init - initialize the tick control
 *
 * Register the notifier with the clockevents framework
 */
void __init tick_init(void)
{
	clockevents_register_notifier(&tick_notifier);
}