kvmclock.c 5.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*  KVM paravirtual clock driver. A clocksource implementation
    Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
*/

#include <linux/clocksource.h>
#include <linux/kvm_para.h>
21
#include <asm/pvclock.h>
22 23 24 25
#include <asm/arch_hooks.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include <linux/percpu.h>
26
#include <asm/reboot.h>
27 28 29 30 31 32 33 34 35 36 37 38 39

#define KVM_SCALE 22

static int kvmclock = 1;

static int parse_no_kvmclock(char *arg)
{
	kvmclock = 0;
	return 0;
}
early_param("no-kvmclock", parse_no_kvmclock);

/* The hypervisor will put information about time periodically here */
40 41
static DEFINE_PER_CPU_SHARED_ALIGNED(struct pvclock_vcpu_time_info, hv_clock);
static struct pvclock_wall_clock wall_clock;
42 43 44 45 46 47

/*
 * The wallclock is the time of day when we booted. Since then, some time may
 * have elapsed since the hypervisor wrote the data. So we try to account for
 * that with system time
 */
I
Ingo Molnar 已提交
48
static unsigned long kvm_get_wallclock(void)
49
{
50
	struct pvclock_vcpu_time_info *vcpu_time;
51 52 53 54 55
	struct timespec ts;
	int low, high;

	low = (int)__pa(&wall_clock);
	high = ((u64)__pa(&wall_clock) >> 32);
56
	native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
57

58 59 60
	vcpu_time = &get_cpu_var(hv_clock);
	pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
	put_cpu_var(hv_clock);
61

62
	return ts.tv_sec;
63 64
}

I
Ingo Molnar 已提交
65
static int kvm_set_wallclock(unsigned long now)
66
{
67
	return -1;
68 69 70 71
}

static cycle_t kvm_clock_read(void)
{
72 73
	struct pvclock_vcpu_time_info *src;
	cycle_t ret;
74

75 76 77 78
	src = &get_cpu_var(hv_clock);
	ret = pvclock_clocksource_read(src);
	put_cpu_var(hv_clock);
	return ret;
79
}
80

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
/*
 * If we don't do that, there is the possibility that the guest
 * will calibrate under heavy load - thus, getting a lower lpj -
 * and execute the delays themselves without load. This is wrong,
 * because no delay loop can finish beforehand.
 * Any heuristics is subject to fail, because ultimately, a large
 * poll of guests can be running and trouble each other. So we preset
 * lpj here
 */
static unsigned long kvm_get_tsc_khz(void)
{
	return preset_lpj;
}

static void kvm_get_preset_lpj(void)
{
	struct pvclock_vcpu_time_info *src;
	unsigned long khz;
	u64 lpj;

	src = &per_cpu(hv_clock, 0);
	khz = pvclock_tsc_khz(src);

	lpj = ((u64)khz * 1000);
	do_div(lpj, HZ);
	preset_lpj = lpj;
}

109 110 111 112 113 114 115 116 117 118
static struct clocksource kvm_clock = {
	.name = "kvm-clock",
	.read = kvm_clock_read,
	.rating = 400,
	.mask = CLOCKSOURCE_MASK(64),
	.mult = 1 << KVM_SCALE,
	.shift = KVM_SCALE,
	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};

119
static int kvm_register_clock(char *txt)
120 121 122 123 124
{
	int cpu = smp_processor_id();
	int low, high;
	low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
	high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
125 126
	printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
	       cpu, high, low, txt);
127 128 129
	return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
}

130
#ifdef CONFIG_X86_LOCAL_APIC
131 132 133 134 135 136
static void kvm_setup_secondary_clock(void)
{
	/*
	 * Now that the first cpu already had this clocksource initialized,
	 * we shouldn't fail.
	 */
137
	WARN_ON(kvm_register_clock("secondary cpu clock"));
138 139 140
	/* ok, done with our trickery, call native */
	setup_secondary_APIC_clock();
}
141
#endif
142

143
#ifdef CONFIG_SMP
144
static void __init kvm_smp_prepare_boot_cpu(void)
145 146 147 148 149 150
{
	WARN_ON(kvm_register_clock("primary cpu clock"));
	native_smp_prepare_boot_cpu();
}
#endif

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
/*
 * After the clock is registered, the host will keep writing to the
 * registered memory location. If the guest happens to shutdown, this memory
 * won't be valid. In cases like kexec, in which you install a new kernel, this
 * means a random memory location will be kept being written. So before any
 * kind of shutdown from our side, we unregister the clock by writting anything
 * that does not have the 'enable' bit set in the msr
 */
#ifdef CONFIG_KEXEC
static void kvm_crash_shutdown(struct pt_regs *regs)
{
	native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
	native_machine_crash_shutdown(regs);
}
#endif

static void kvm_shutdown(void)
{
	native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
	native_machine_shutdown();
}

173 174 175 176 177 178
void __init kvmclock_init(void)
{
	if (!kvm_para_available())
		return;

	if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
179
		if (kvm_register_clock("boot clock"))
180 181 182 183
			return;
		pv_time_ops.get_wallclock = kvm_get_wallclock;
		pv_time_ops.set_wallclock = kvm_set_wallclock;
		pv_time_ops.sched_clock = kvm_clock_read;
184
		pv_time_ops.get_tsc_khz = kvm_get_tsc_khz;
185
#ifdef CONFIG_X86_LOCAL_APIC
186
		pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
187 188 189
#endif
#ifdef CONFIG_SMP
		smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
190
#endif
191 192 193 194
		machine_ops.shutdown  = kvm_shutdown;
#ifdef CONFIG_KEXEC
		machine_ops.crash_shutdown  = kvm_crash_shutdown;
#endif
195
		kvm_get_preset_lpj();
196 197 198
		clocksource_register(&kvm_clock);
	}
}