time.c 13.1 KB
Newer Older
J
Jeremy Fitzhardinge 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Xen time implementation.
 *
 * This is implemented in terms of a clocksource driver which uses
 * the hypervisor clock as a nanosecond timebase, and a clockevent
 * driver which uses the hypervisor's timer mechanism.
 *
 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
 */
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
14
#include <linux/kernel_stat.h>
15
#include <linux/math64.h>
16
#include <linux/gfp.h>
J
Jeremy Fitzhardinge 已提交
17

18
#include <asm/pvclock.h>
J
Jeremy Fitzhardinge 已提交
19 20 21 22
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>

#include <xen/events.h>
23
#include <xen/features.h>
J
Jeremy Fitzhardinge 已提交
24 25 26 27 28 29 30
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>

#include "xen-ops.h"

/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP	100000
31
#define NS_PER_TICK	(1000000000LL / HZ)
J
Jeremy Fitzhardinge 已提交
32

33
/* runstate info updated by Xen */
34
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
35 36

/* snapshots of runstate info */
37
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
38 39

/* unused ns of stolen and blocked time */
40 41
static DEFINE_PER_CPU(u64, xen_residual_stolen);
static DEFINE_PER_CPU(u64, xen_residual_blocked);
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
{
	u64 ret;

	if (BITS_PER_LONG < 64) {
		u32 *p32 = (u32 *)p;
		u32 h, l;

		/*
		 * Read high then low, and then make sure high is
		 * still the same; this will only loop if low wraps
		 * and carries into high.
		 * XXX some clean way to make this endian-proof?
		 */
		do {
			h = p32[1];
			barrier();
			l = p32[0];
			barrier();
		} while (p32[1] != h);

		ret = (((u64)h) << 32) | l;
	} else
		ret = *p;

	return ret;
}

/*
 * Runstate accounting
 */
static void get_runstate_snapshot(struct vcpu_runstate_info *res)
{
	u64 state_time;
	struct vcpu_runstate_info *state;

80
	BUG_ON(preemptible());
81

82
	state = &__get_cpu_var(xen_runstate);
83 84 85 86 87 88 89 90 91 92 93 94 95 96

	/*
	 * The runstate info is always updated by the hypervisor on
	 * the current CPU, so there's no need to use anything
	 * stronger than a compiler barrier when fetching it.
	 */
	do {
		state_time = get64(&state->state_entry_time);
		barrier();
		*res = *state;
		barrier();
	} while (get64(&state->state_entry_time) != state_time);
}

97 98 99
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen(int vcpu)
{
100
	return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
101 102
}

103
void xen_setup_runstate_info(int cpu)
104 105 106
{
	struct vcpu_register_runstate_memory_area area;

107
	area.addr.v = &per_cpu(xen_runstate, cpu);
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

	if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
			       cpu, &area))
		BUG();
}

static void do_stolen_accounting(void)
{
	struct vcpu_runstate_info state;
	struct vcpu_runstate_info *snap;
	s64 blocked, runnable, offline, stolen;
	cputime_t ticks;

	get_runstate_snapshot(&state);

	WARN_ON(state.state != RUNSTATE_running);

125
	snap = &__get_cpu_var(xen_runstate_snapshot);
126 127 128 129 130 131 132 133 134

	/* work out how much time the VCPU has not been runn*ing*  */
	blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];

	*snap = state;

	/* Add the appropriate number of ticks of stolen time,
135
	   including any left-overs from last time. */
C
Christoph Lameter 已提交
136
	stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
137 138 139 140

	if (stolen < 0)
		stolen = 0;

141
	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
C
Christoph Lameter 已提交
142
	__this_cpu_write(xen_residual_stolen, stolen);
143
	account_steal_ticks(ticks);
144 145

	/* Add the appropriate number of ticks of blocked time,
146
	   including any left-overs from last time. */
C
Christoph Lameter 已提交
147
	blocked += __this_cpu_read(xen_residual_blocked);
148 149 150 151

	if (blocked < 0)
		blocked = 0;

152
	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
C
Christoph Lameter 已提交
153
	__this_cpu_write(xen_residual_blocked, blocked);
154
	account_idle_ticks(ticks);
155 156
}

157
/* Get the TSC speed from Xen */
158
static unsigned long xen_tsc_khz(void)
J
Jeremy Fitzhardinge 已提交
159
{
160
	struct pvclock_vcpu_time_info *info =
J
Jeremy Fitzhardinge 已提交
161 162
		&HYPERVISOR_shared_info->vcpu_info[0].time;

163
	return pvclock_tsc_khz(info);
J
Jeremy Fitzhardinge 已提交
164 165
}

166
cycle_t xen_clocksource_read(void)
J
Jeremy Fitzhardinge 已提交
167
{
168
        struct pvclock_vcpu_time_info *src;
J
Jeremy Fitzhardinge 已提交
169 170
	cycle_t ret;

171 172
	preempt_disable_notrace();
	src = &__get_cpu_var(xen_vcpu)->time;
173
	ret = pvclock_clocksource_read(src);
174
	preempt_enable_notrace();
J
Jeremy Fitzhardinge 已提交
175 176 177
	return ret;
}

178 179 180 181 182
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
{
	return xen_clocksource_read();
}

J
Jeremy Fitzhardinge 已提交
183 184
static void xen_read_wallclock(struct timespec *ts)
{
185 186 187
	struct shared_info *s = HYPERVISOR_shared_info;
	struct pvclock_wall_clock *wall_clock = &(s->wc);
        struct pvclock_vcpu_time_info *vcpu_time;
J
Jeremy Fitzhardinge 已提交
188

189 190 191
	vcpu_time = &get_cpu_var(xen_vcpu)->time;
	pvclock_read_wallclock(wall_clock, vcpu_time, ts);
	put_cpu_var(xen_vcpu);
J
Jeremy Fitzhardinge 已提交
192 193
}

194
static unsigned long xen_get_wallclock(void)
J
Jeremy Fitzhardinge 已提交
195 196 197 198 199 200 201
{
	struct timespec ts;

	xen_read_wallclock(&ts);
	return ts.tv_sec;
}

202
static int xen_set_wallclock(unsigned long now)
J
Jeremy Fitzhardinge 已提交
203
{
204 205 206
	struct xen_platform_op op;
	int rc;

J
Jeremy Fitzhardinge 已提交
207
	/* do nothing for domU */
208 209 210 211 212 213 214 215 216 217 218 219
	if (!xen_initial_domain())
		return -1;

	op.cmd = XENPF_settime;
	op.u.settime.secs = now;
	op.u.settime.nsecs = 0;
	op.u.settime.system_time = xen_clocksource_read();

	rc = HYPERVISOR_dom0_op(&op);
	WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now);

	return rc;
J
Jeremy Fitzhardinge 已提交
220 221 222 223 224
}

static struct clocksource xen_clocksource __read_mostly = {
	.name = "xen",
	.rating = 400,
225
	.read = xen_clocksource_get_cycles,
J
Jeremy Fitzhardinge 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	.mask = ~0,
	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};

/*
   Xen clockevent implementation

   Xen has two clockevent implementations:

   The old timer_op one works with all released versions of Xen prior
   to version 3.0.4.  This version of the hypervisor provides a
   single-shot timer with nanosecond resolution.  However, sharing the
   same event channel is a 100Hz tick which is delivered while the
   vcpu is running.  We don't care about or use this tick, but it will
   cause the core time code to think the timer fired too soon, and
   will end up resetting it each time.  It could be filtered, but
   doing so has complications when the ktime clocksource is not yet
   the xen clocksource (ie, at boot time).

   The new vcpu_op-based timer interface allows the tick timer period
   to be changed or turned off.  The tick timer is not useful as a
   periodic timer because events are only delivered to running vcpus.
   The one-shot timer can report when a timeout is in the past, so
   set_next_event is capable of returning -ETIME when appropriate.
   This interface is used when available.
*/


/*
  Get a hypervisor absolute time.  In theory we could maintain an
  offset between the kernel's time and the hypervisor's time, and
  apply that to a kernel's absolute timeout.  Unfortunately the
  hypervisor and kernel times can drift even if the kernel is using
  the Xen clocksource, because ntp can warp the kernel's clocksource.
*/
static s64 get_abs_timeout(unsigned long delta)
{
	return xen_clocksource_read() + delta;
}

static void xen_timerop_set_mode(enum clock_event_mode mode,
				 struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
		/* unsupported */
		WARN_ON(1);
		break;

	case CLOCK_EVT_MODE_ONESHOT:
T
Thomas Gleixner 已提交
276
	case CLOCK_EVT_MODE_RESUME:
J
Jeremy Fitzhardinge 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
		break;

	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		HYPERVISOR_set_timer_op(0);  /* cancel timeout */
		break;
	}
}

static int xen_timerop_set_next_event(unsigned long delta,
				      struct clock_event_device *evt)
{
	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);

	if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
		BUG();

	/* We may have missed the deadline, but there's no real way of
	   knowing for sure.  If the event was in the past, then we'll
	   get an immediate interrupt. */

	return 0;
}

static const struct clock_event_device xen_timerop_clockevent = {
	.name = "xen",
	.features = CLOCK_EVT_FEAT_ONESHOT,

	.max_delta_ns = 0xffffffff,
	.min_delta_ns = TIMER_SLOP,

	.mult = 1,
	.shift = 0,
	.rating = 500,

	.set_mode = xen_timerop_set_mode,
	.set_next_event = xen_timerop_set_next_event,
};



static void xen_vcpuop_set_mode(enum clock_event_mode mode,
				struct clock_event_device *evt)
{
	int cpu = smp_processor_id();

	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC:
		WARN_ON(1);	/* unsupported */
		break;

	case CLOCK_EVT_MODE_ONESHOT:
		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
			BUG();
		break;

	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
		    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
			BUG();
		break;
T
Thomas Gleixner 已提交
339 340
	case CLOCK_EVT_MODE_RESUME:
		break;
J
Jeremy Fitzhardinge 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	}
}

static int xen_vcpuop_set_next_event(unsigned long delta,
				     struct clock_event_device *evt)
{
	int cpu = smp_processor_id();
	struct vcpu_set_singleshot_timer single;
	int ret;

	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);

	single.timeout_abs_ns = get_abs_timeout(delta);
	single.flags = VCPU_SSHOTTMR_future;

	ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);

	BUG_ON(ret != 0 && ret != -ETIME);

	return ret;
}

static const struct clock_event_device xen_vcpuop_clockevent = {
	.name = "xen",
	.features = CLOCK_EVT_FEAT_ONESHOT,

	.max_delta_ns = 0xffffffff,
	.min_delta_ns = TIMER_SLOP,

	.mult = 1,
	.shift = 0,
	.rating = 500,

	.set_mode = xen_vcpuop_set_mode,
	.set_next_event = xen_vcpuop_set_next_event,
};

static const struct clock_event_device *xen_clockevent =
	&xen_timerop_clockevent;
380 381 382 383 384 385

struct xen_clock_event_device {
	struct clock_event_device evt;
	char *name;
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
J
Jeremy Fitzhardinge 已提交
386 387 388

static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
389
	struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
J
Jeremy Fitzhardinge 已提交
390 391 392 393 394 395 396 397
	irqreturn_t ret;

	ret = IRQ_NONE;
	if (evt->event_handler) {
		evt->event_handler(evt);
		ret = IRQ_HANDLED;
	}

398 399
	do_stolen_accounting();

J
Jeremy Fitzhardinge 已提交
400 401 402
	return ret;
}

J
Jeremy Fitzhardinge 已提交
403
void xen_setup_timer(int cpu)
J
Jeremy Fitzhardinge 已提交
404 405 406 407 408
{
	const char *name;
	struct clock_event_device *evt;
	int irq;

409
	evt = &per_cpu(xen_clock_events, cpu).evt;
410 411
	WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);

J
Jeremy Fitzhardinge 已提交
412 413 414 415 416 417 418
	printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);

	name = kasprintf(GFP_KERNEL, "timer%d", cpu);
	if (!name)
		name = "<timer kasprintf failed>";

	irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
419 420 421
				      IRQF_DISABLED|IRQF_PERCPU|
				      IRQF_NOBALANCING|IRQF_TIMER|
				      IRQF_FORCE_RESUME,
J
Jeremy Fitzhardinge 已提交
422 423 424 425
				      name, NULL);

	memcpy(evt, xen_clockevent, sizeof(*evt));

426
	evt->cpumask = cpumask_of(cpu);
J
Jeremy Fitzhardinge 已提交
427
	evt->irq = irq;
J
Jeremy Fitzhardinge 已提交
428 429
}

A
Alex Nixon 已提交
430 431 432 433
void xen_teardown_timer(int cpu)
{
	struct clock_event_device *evt;
	BUG_ON(cpu == 0);
434
	evt = &per_cpu(xen_clock_events, cpu).evt;
A
Alex Nixon 已提交
435
	unbind_from_irqhandler(evt->irq, NULL);
436
	evt->irq = -1;
A
Alex Nixon 已提交
437 438
}

J
Jeremy Fitzhardinge 已提交
439 440 441
void xen_setup_cpu_clockevents(void)
{
	BUG_ON(preemptible());
442

443
	clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
J
Jeremy Fitzhardinge 已提交
444 445
}

446 447 448 449
void xen_timer_resume(void)
{
	int cpu;

450 451
	pvclock_resume();

452 453 454 455 456 457 458 459 460
	if (xen_clockevent != &xen_vcpuop_clockevent)
		return;

	for_each_online_cpu(cpu) {
		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
			BUG();
	}
}

461
static const struct pv_time_ops xen_time_ops __initconst = {
462
	.sched_clock = xen_clocksource_read,
463 464
};

465
static void __init xen_time_init(void)
J
Jeremy Fitzhardinge 已提交
466 467
{
	int cpu = smp_processor_id();
468
	struct timespec tp;
J
Jeremy Fitzhardinge 已提交
469

470
	clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
J
Jeremy Fitzhardinge 已提交
471 472

	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
473
		/* Successfully turned off 100Hz tick, so we have the
J
Jeremy Fitzhardinge 已提交
474 475 476 477 478 479
		   vcpuop-based timer interface */
		printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
		xen_clockevent = &xen_vcpuop_clockevent;
	}

	/* Set initial system time with full resolution */
480 481
	xen_read_wallclock(&tp);
	do_settimeofday(&tp);
J
Jeremy Fitzhardinge 已提交
482

483
	setup_force_cpu_cap(X86_FEATURE_TSC);
J
Jeremy Fitzhardinge 已提交
484

485
	xen_setup_runstate_info(cpu);
J
Jeremy Fitzhardinge 已提交
486
	xen_setup_timer(cpu);
J
Jeremy Fitzhardinge 已提交
487
	xen_setup_cpu_clockevents();
J
Jeremy Fitzhardinge 已提交
488
}
489

490
void __init xen_init_time_ops(void)
491 492 493 494 495 496 497 498 499 500 501 502
{
	pv_time_ops = xen_time_ops;

	x86_init.timers.timer_init = xen_time_init;
	x86_init.timers.setup_percpu_clockev = x86_init_noop;
	x86_cpuinit.setup_percpu_clockev = x86_init_noop;

	x86_platform.calibrate_tsc = xen_tsc_khz;
	x86_platform.get_wallclock = xen_get_wallclock;
	x86_platform.set_wallclock = xen_set_wallclock;
}

503
#ifdef CONFIG_XEN_PVHVM
504 505 506 507
static void xen_hvm_setup_cpu_clockevents(void)
{
	int cpu = smp_processor_id();
	xen_setup_runstate_info(cpu);
508 509 510 511 512
	/*
	 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
	 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
	 * early bootup and also during CPU hotplug events).
	 */
513 514 515
	xen_setup_cpu_clockevents();
}

516
void __init xen_hvm_init_time_ops(void)
517 518
{
	/* vector callback is needed otherwise we cannot receive interrupts
519 520 521
	 * on cpu > 0 and at this point we don't know how many cpus are
	 * available */
	if (!xen_have_vector_callback)
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
		return;
	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
				"disable pv timer\n");
		return;
	}

	pv_time_ops = xen_time_ops;
	x86_init.timers.setup_percpu_clockev = xen_time_init;
	x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;

	x86_platform.calibrate_tsc = xen_tsc_khz;
	x86_platform.get_wallclock = xen_get_wallclock;
	x86_platform.set_wallclock = xen_set_wallclock;
}
537
#endif