time.c 13.5 KB
Newer Older
J
Jeremy Fitzhardinge 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Xen time implementation.
 *
 * This is implemented in terms of a clocksource driver which uses
 * the hypervisor clock as a nanosecond timebase, and a clockevent
 * driver which uses the hypervisor's timer mechanism.
 *
 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
 */
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
14
#include <linux/kernel_stat.h>
15
#include <linux/math64.h>
16
#include <linux/gfp.h>
17
#include <linux/slab.h>
18
#include <linux/pvclock_gtod.h>
J
Jeremy Fitzhardinge 已提交
19

20
#include <asm/pvclock.h>
J
Jeremy Fitzhardinge 已提交
21 22 23 24
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>

#include <xen/events.h>
25
#include <xen/features.h>
J
Jeremy Fitzhardinge 已提交
26 27 28 29 30 31 32
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>

#include "xen-ops.h"

/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP	100000
33
#define NS_PER_TICK	(1000000000LL / HZ)
J
Jeremy Fitzhardinge 已提交
34

35
/* runstate info updated by Xen */
36
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
37 38

/* snapshots of runstate info */
39
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
40

41
/* unused ns of stolen time */
42
static DEFINE_PER_CPU(u64, xen_residual_stolen);
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
{
	u64 ret;

	if (BITS_PER_LONG < 64) {
		u32 *p32 = (u32 *)p;
		u32 h, l;

		/*
		 * Read high then low, and then make sure high is
		 * still the same; this will only loop if low wraps
		 * and carries into high.
		 * XXX some clean way to make this endian-proof?
		 */
		do {
			h = p32[1];
			barrier();
			l = p32[0];
			barrier();
		} while (p32[1] != h);

		ret = (((u64)h) << 32) | l;
	} else
		ret = *p;

	return ret;
}

/*
 * Runstate accounting
 */
static void get_runstate_snapshot(struct vcpu_runstate_info *res)
{
	u64 state_time;
	struct vcpu_runstate_info *state;

81
	BUG_ON(preemptible());
82

83
	state = this_cpu_ptr(&xen_runstate);
84 85 86 87 88 89 90 91 92 93 94 95 96 97

	/*
	 * The runstate info is always updated by the hypervisor on
	 * the current CPU, so there's no need to use anything
	 * stronger than a compiler barrier when fetching it.
	 */
	do {
		state_time = get64(&state->state_entry_time);
		barrier();
		*res = *state;
		barrier();
	} while (get64(&state->state_entry_time) != state_time);
}

98 99 100
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen(int vcpu)
{
101
	return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
102 103
}

104
void xen_setup_runstate_info(int cpu)
105 106 107
{
	struct vcpu_register_runstate_memory_area area;

108
	area.addr.v = &per_cpu(xen_runstate, cpu);
109 110 111 112 113 114 115 116 117 118

	if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
			       cpu, &area))
		BUG();
}

static void do_stolen_accounting(void)
{
	struct vcpu_runstate_info state;
	struct vcpu_runstate_info *snap;
119
	s64 runnable, offline, stolen;
120 121 122 123 124 125
	cputime_t ticks;

	get_runstate_snapshot(&state);

	WARN_ON(state.state != RUNSTATE_running);

126
	snap = this_cpu_ptr(&xen_runstate_snapshot);
127 128 129 130 131 132 133 134

	/* work out how much time the VCPU has not been runn*ing*  */
	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];

	*snap = state;

	/* Add the appropriate number of ticks of stolen time,
135
	   including any left-overs from last time. */
C
Christoph Lameter 已提交
136
	stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
137 138 139 140

	if (stolen < 0)
		stolen = 0;

141
	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
C
Christoph Lameter 已提交
142
	__this_cpu_write(xen_residual_stolen, stolen);
143
	account_steal_ticks(ticks);
144 145
}

146
/* Get the TSC speed from Xen */
147
static unsigned long xen_tsc_khz(void)
J
Jeremy Fitzhardinge 已提交
148
{
149
	struct pvclock_vcpu_time_info *info =
J
Jeremy Fitzhardinge 已提交
150 151
		&HYPERVISOR_shared_info->vcpu_info[0].time;

152
	return pvclock_tsc_khz(info);
J
Jeremy Fitzhardinge 已提交
153 154
}

155
cycle_t xen_clocksource_read(void)
J
Jeremy Fitzhardinge 已提交
156
{
157
        struct pvclock_vcpu_time_info *src;
J
Jeremy Fitzhardinge 已提交
158 159
	cycle_t ret;

160
	preempt_disable_notrace();
161
	src = &__this_cpu_read(xen_vcpu)->time;
162
	ret = pvclock_clocksource_read(src);
163
	preempt_enable_notrace();
J
Jeremy Fitzhardinge 已提交
164 165 166
	return ret;
}

167 168 169 170 171
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
{
	return xen_clocksource_read();
}

J
Jeremy Fitzhardinge 已提交
172 173
static void xen_read_wallclock(struct timespec *ts)
{
174 175 176
	struct shared_info *s = HYPERVISOR_shared_info;
	struct pvclock_wall_clock *wall_clock = &(s->wc);
        struct pvclock_vcpu_time_info *vcpu_time;
J
Jeremy Fitzhardinge 已提交
177

178 179 180
	vcpu_time = &get_cpu_var(xen_vcpu)->time;
	pvclock_read_wallclock(wall_clock, vcpu_time, ts);
	put_cpu_var(xen_vcpu);
J
Jeremy Fitzhardinge 已提交
181 182
}

183
static void xen_get_wallclock(struct timespec *now)
J
Jeremy Fitzhardinge 已提交
184
{
185
	xen_read_wallclock(now);
J
Jeremy Fitzhardinge 已提交
186 187
}

188
static int xen_set_wallclock(const struct timespec *now)
J
Jeremy Fitzhardinge 已提交
189
{
190
	return -1;
J
Jeremy Fitzhardinge 已提交
191 192
}

193 194
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
				   unsigned long was_set, void *priv)
J
Jeremy Fitzhardinge 已提交
195
{
196 197
	/* Protected by the calling core code serialization */
	static struct timespec next_sync;
198

199
	struct xen_platform_op op;
200
	struct timespec now;
201

202 203
	now = __current_kernel_time();

204 205 206 207 208 209
	/*
	 * We only take the expensive HV call when the clock was set
	 * or when the 11 minutes RTC synchronization time elapsed.
	 */
	if (!was_set && timespec_compare(&now, &next_sync) < 0)
		return NOTIFY_OK;
210 211

	op.cmd = XENPF_settime;
212 213
	op.u.settime.secs = now.tv_sec;
	op.u.settime.nsecs = now.tv_nsec;
214 215
	op.u.settime.system_time = xen_clocksource_read();

216
	(void)HYPERVISOR_dom0_op(&op);
217

218 219 220 221 222 223 224 225
	/*
	 * Move the next drift compensation time 11 minutes
	 * ahead. That's emulating the sync_cmos_clock() update for
	 * the hardware RTC.
	 */
	next_sync = now;
	next_sync.tv_sec += 11 * 60;

226
	return NOTIFY_OK;
J
Jeremy Fitzhardinge 已提交
227 228
}

229 230 231 232
static struct notifier_block xen_pvclock_gtod_notifier = {
	.notifier_call = xen_pvclock_gtod_notify,
};

J
Jeremy Fitzhardinge 已提交
233 234 235
static struct clocksource xen_clocksource __read_mostly = {
	.name = "xen",
	.rating = 400,
236
	.read = xen_clocksource_get_cycles,
J
Jeremy Fitzhardinge 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	.mask = ~0,
	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};

/*
   Xen clockevent implementation

   Xen has two clockevent implementations:

   The old timer_op one works with all released versions of Xen prior
   to version 3.0.4.  This version of the hypervisor provides a
   single-shot timer with nanosecond resolution.  However, sharing the
   same event channel is a 100Hz tick which is delivered while the
   vcpu is running.  We don't care about or use this tick, but it will
   cause the core time code to think the timer fired too soon, and
   will end up resetting it each time.  It could be filtered, but
   doing so has complications when the ktime clocksource is not yet
   the xen clocksource (ie, at boot time).

   The new vcpu_op-based timer interface allows the tick timer period
   to be changed or turned off.  The tick timer is not useful as a
   periodic timer because events are only delivered to running vcpus.
   The one-shot timer can report when a timeout is in the past, so
   set_next_event is capable of returning -ETIME when appropriate.
   This interface is used when available.
*/


/*
  Get a hypervisor absolute time.  In theory we could maintain an
  offset between the kernel's time and the hypervisor's time, and
  apply that to a kernel's absolute timeout.  Unfortunately the
  hypervisor and kernel times can drift even if the kernel is using
  the Xen clocksource, because ntp can warp the kernel's clocksource.
*/
static s64 get_abs_timeout(unsigned long delta)
{
	return xen_clocksource_read() + delta;
}

static void xen_timerop_set_mode(enum clock_event_mode mode,
				 struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
		/* unsupported */
		WARN_ON(1);
		break;

	case CLOCK_EVT_MODE_ONESHOT:
T
Thomas Gleixner 已提交
287
	case CLOCK_EVT_MODE_RESUME:
J
Jeremy Fitzhardinge 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
		break;

	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		HYPERVISOR_set_timer_op(0);  /* cancel timeout */
		break;
	}
}

static int xen_timerop_set_next_event(unsigned long delta,
				      struct clock_event_device *evt)
{
	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);

	if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
		BUG();

	/* We may have missed the deadline, but there's no real way of
	   knowing for sure.  If the event was in the past, then we'll
	   get an immediate interrupt. */

	return 0;
}

static const struct clock_event_device xen_timerop_clockevent = {
	.name = "xen",
	.features = CLOCK_EVT_FEAT_ONESHOT,

	.max_delta_ns = 0xffffffff,
	.min_delta_ns = TIMER_SLOP,

	.mult = 1,
	.shift = 0,
	.rating = 500,

	.set_mode = xen_timerop_set_mode,
	.set_next_event = xen_timerop_set_next_event,
};



static void xen_vcpuop_set_mode(enum clock_event_mode mode,
				struct clock_event_device *evt)
{
	int cpu = smp_processor_id();

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
		WARN_ON(1);	/* unsupported */
		break;

	case CLOCK_EVT_MODE_ONESHOT:
		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
			BUG();
		break;

	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
		    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
			BUG();
		break;
T
Thomas Gleixner 已提交
350 351
	case CLOCK_EVT_MODE_RESUME:
		break;
J
Jeremy Fitzhardinge 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	}
}

static int xen_vcpuop_set_next_event(unsigned long delta,
				     struct clock_event_device *evt)
{
	int cpu = smp_processor_id();
	struct vcpu_set_singleshot_timer single;
	int ret;

	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);

	single.timeout_abs_ns = get_abs_timeout(delta);
	single.flags = VCPU_SSHOTTMR_future;

	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);

	BUG_ON(ret != 0 && ret != -ETIME);

	return ret;
}

static const struct clock_event_device xen_vcpuop_clockevent = {
	.name = "xen",
	.features = CLOCK_EVT_FEAT_ONESHOT,

	.max_delta_ns = 0xffffffff,
	.min_delta_ns = TIMER_SLOP,

	.mult = 1,
	.shift = 0,
	.rating = 500,

	.set_mode = xen_vcpuop_set_mode,
	.set_next_event = xen_vcpuop_set_next_event,
};

static const struct clock_event_device *xen_clockevent =
	&xen_timerop_clockevent;
391 392 393

struct xen_clock_event_device {
	struct clock_event_device evt;
394
	char name[16];
395 396
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
J
Jeremy Fitzhardinge 已提交
397 398 399

static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
400
	struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
J
Jeremy Fitzhardinge 已提交
401 402 403 404 405 406 407 408
	irqreturn_t ret;

	ret = IRQ_NONE;
	if (evt->event_handler) {
		evt->event_handler(evt);
		ret = IRQ_HANDLED;
	}

409 410
	do_stolen_accounting();

J
Jeremy Fitzhardinge 已提交
411 412 413
	return ret;
}

414 415 416 417 418 419 420 421 422 423 424 425
void xen_teardown_timer(int cpu)
{
	struct clock_event_device *evt;
	BUG_ON(cpu == 0);
	evt = &per_cpu(xen_clock_events, cpu).evt;

	if (evt->irq >= 0) {
		unbind_from_irqhandler(evt->irq, NULL);
		evt->irq = -1;
	}
}

J
Jeremy Fitzhardinge 已提交
426
void xen_setup_timer(int cpu)
J
Jeremy Fitzhardinge 已提交
427
{
428 429
	struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
	struct clock_event_device *evt = &xevt->evt;
J
Jeremy Fitzhardinge 已提交
430 431
	int irq;

432
	WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
433 434
	if (evt->irq >= 0)
		xen_teardown_timer(cpu);
435

J
Jeremy Fitzhardinge 已提交
436 437
	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);

438
	snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
J
Jeremy Fitzhardinge 已提交
439 440

	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
441
				      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
D
David Vrabel 已提交
442
				      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
443
				      xevt->name, NULL);
444
	(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
J
Jeremy Fitzhardinge 已提交
445 446 447

	memcpy(evt, xen_clockevent, sizeof(*evt));

448
	evt->cpumask = cpumask_of(cpu);
J
Jeremy Fitzhardinge 已提交
449
	evt->irq = irq;
J
Jeremy Fitzhardinge 已提交
450 451
}

A
Alex Nixon 已提交
452

J
Jeremy Fitzhardinge 已提交
453 454
void xen_setup_cpu_clockevents(void)
{
455
	clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
J
Jeremy Fitzhardinge 已提交
456 457
}

458 459 460 461
void xen_timer_resume(void)
{
	int cpu;

462 463
	pvclock_resume();

464 465 466 467 468 469 470 471 472
	if (xen_clockevent != &xen_vcpuop_clockevent)
		return;

	for_each_online_cpu(cpu) {
		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
			BUG();
	}
}

473
static const struct pv_time_ops xen_time_ops __initconst = {
474
	.sched_clock = xen_clocksource_read,
475 476
};

477
static void __init xen_time_init(void)
J
Jeremy Fitzhardinge 已提交
478 479
{
	int cpu = smp_processor_id();
480
	struct timespec tp;
J
Jeremy Fitzhardinge 已提交
481

482
	clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
J
Jeremy Fitzhardinge 已提交
483 484

	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
485
		/* Successfully turned off 100Hz tick, so we have the
J
Jeremy Fitzhardinge 已提交
486 487 488 489 490 491
		   vcpuop-based timer interface */
		printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
		xen_clockevent = &xen_vcpuop_clockevent;
	}

	/* Set initial system time with full resolution */
492 493
	xen_read_wallclock(&tp);
	do_settimeofday(&tp);
J
Jeremy Fitzhardinge 已提交
494

495
	setup_force_cpu_cap(X86_FEATURE_TSC);
J
Jeremy Fitzhardinge 已提交
496

497
	xen_setup_runstate_info(cpu);
J
Jeremy Fitzhardinge 已提交
498
	xen_setup_timer(cpu);
J
Jeremy Fitzhardinge 已提交
499
	xen_setup_cpu_clockevents();
500 501 502

	if (xen_initial_domain())
		pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
J
Jeremy Fitzhardinge 已提交
503
}
504

505
void __init xen_init_time_ops(void)
506 507 508 509 510 511 512 513 514
{
	pv_time_ops = xen_time_ops;

	x86_init.timers.timer_init = xen_time_init;
	x86_init.timers.setup_percpu_clockev = x86_init_noop;
	x86_cpuinit.setup_percpu_clockev = x86_init_noop;

	x86_platform.calibrate_tsc = xen_tsc_khz;
	x86_platform.get_wallclock = xen_get_wallclock;
515 516 517
	/* Dom0 uses the native method to set the hardware RTC. */
	if (!xen_initial_domain())
		x86_platform.set_wallclock = xen_set_wallclock;
518 519
}

520
#ifdef CONFIG_XEN_PVHVM
521 522 523 524
static void xen_hvm_setup_cpu_clockevents(void)
{
	int cpu = smp_processor_id();
	xen_setup_runstate_info(cpu);
525 526 527 528 529
	/*
	 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
	 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
	 * early bootup and also during CPU hotplug events).
	 */
530 531 532
	xen_setup_cpu_clockevents();
}

533
void __init xen_hvm_init_time_ops(void)
534 535
{
	/* vector callback is needed otherwise we cannot receive interrupts
536 537 538
	 * on cpu > 0 and at this point we don't know how many cpus are
	 * available */
	if (!xen_have_vector_callback)
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
		return;
	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
				"disable pv timer\n");
		return;
	}

	pv_time_ops = xen_time_ops;
	x86_init.timers.setup_percpu_clockev = xen_time_init;
	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;

	x86_platform.calibrate_tsc = xen_tsc_khz;
	x86_platform.get_wallclock = xen_get_wallclock;
	x86_platform.set_wallclock = xen_set_wallclock;
}
554
#endif