kvm.c 15.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * KVM paravirt_ops implementation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 * Copyright IBM Corporation, 2007
 *   Authors: Anthony Liguori <aliguori@us.ibm.com>
 */

23
#include <linux/context_tracking.h>
24
#include <linux/init.h>
25 26 27 28
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
#include <linux/mm.h>
29
#include <linux/highmem.h>
30
#include <linux/hardirq.h>
31 32
#include <linux/notifier.h>
#include <linux/reboot.h>
G
Gleb Natapov 已提交
33 34 35 36
#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
37
#include <linux/debugfs.h>
38
#include <linux/nmi.h>
39
#include <linux/swait.h>
40
#include <asm/timer.h>
41
#include <asm/cpu.h>
G
Gleb Natapov 已提交
42 43
#include <asm/traps.h>
#include <asm/desc.h>
44
#include <asm/tlbflush.h>
45 46
#include <asm/apic.h>
#include <asm/apicdef.h>
47
#include <asm/hypervisor.h>
48
#include <asm/kvm_guest.h>
49

50 51 52 53 54 55 56 57 58 59
static int kvmapf = 1;

static int parse_no_kvmapf(char *arg)
{
        kvmapf = 0;
        return 0;
}

early_param("no-kvmapf", parse_no_kvmapf);

60 61 62 63 64 65 66 67 68
static int steal_acc = 1;
static int parse_no_stealacc(char *arg)
{
        steal_acc = 0;
        return 0;
}

early_param("no-steal-acc", parse_no_stealacc);

69 70 71 72 73 74 75 76 77
static int kvmclock_vsyscall = 1;
static int parse_no_kvmclock_vsyscall(char *arg)
{
        kvmclock_vsyscall = 0;
        return 0;
}

early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);

78
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79 80
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
81

82 83 84 85 86 87 88
/*
 * No need for any "IO delay" on KVM
 */
static void kvm_io_delay(void)
{
}

G
Gleb Natapov 已提交
89 90 91 92 93
#define KVM_TASK_SLEEP_HASHBITS 8
#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)

struct kvm_task_sleep_node {
	struct hlist_node link;
94
	struct swait_queue_head wq;
G
Gleb Natapov 已提交
95 96
	u32 token;
	int cpu;
97
	bool halted;
G
Gleb Natapov 已提交
98 99 100
};

static struct kvm_task_sleep_head {
101
	raw_spinlock_t lock;
G
Gleb Natapov 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	struct hlist_head list;
} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];

static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
						  u32 token)
{
	struct hlist_node *p;

	hlist_for_each(p, &b->list) {
		struct kvm_task_sleep_node *n =
			hlist_entry(p, typeof(*n), link);
		if (n->token == token)
			return n;
	}

	return NULL;
}

120 121 122 123 124
/*
 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
 * 		      (other than user space)?
 */
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
G
Gleb Natapov 已提交
125 126 127 128
{
	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
	struct kvm_task_sleep_node n, *e;
129
	DECLARE_SWAITQUEUE(wait);
G
Gleb Natapov 已提交
130

131 132
	rcu_irq_enter();

133
	raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
134 135 136 137 138
	e = _find_apf_task(b, token);
	if (e) {
		/* dummy entry exist -> wake up was delivered ahead of PF */
		hlist_del(&e->link);
		kfree(e);
139
		raw_spin_unlock(&b->lock);
140 141

		rcu_irq_exit();
G
Gleb Natapov 已提交
142 143 144 145 146
		return;
	}

	n.token = token;
	n.cpu = smp_processor_id();
147 148 149 150
	n.halted = is_idle_task(current) ||
		   (IS_ENABLED(CONFIG_PREEMPT_COUNT)
		    ? preempt_count() > 1 || rcu_preempt_depth()
		    : interrupt_kernel);
151
	init_swait_queue_head(&n.wq);
G
Gleb Natapov 已提交
152
	hlist_add_head(&n.link, &b->list);
153
	raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
154 155

	for (;;) {
156
		if (!n.halted)
157
			prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
G
Gleb Natapov 已提交
158 159
		if (hlist_unhashed(&n.link))
			break;
160

161 162
		rcu_irq_exit();

163 164 165 166 167 168 169 170 171 172 173
		if (!n.halted) {
			local_irq_enable();
			schedule();
			local_irq_disable();
		} else {
			/*
			 * We cannot reschedule. So halt.
			 */
			native_safe_halt();
			local_irq_disable();
		}
174 175

		rcu_irq_enter();
G
Gleb Natapov 已提交
176
	}
177
	if (!n.halted)
178
		finish_swait(&n.wq, &wait);
G
Gleb Natapov 已提交
179

180
	rcu_irq_exit();
G
Gleb Natapov 已提交
181 182 183 184 185 186 187
	return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);

static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{
	hlist_del_init(&n->link);
188 189
	if (n->halted)
		smp_send_reschedule(n->cpu);
190
	else if (swq_has_sleeper(&n->wq))
191
		swake_up(&n->wq);
G
Gleb Natapov 已提交
192 193 194 195 196 197 198 199 200
}

static void apf_task_wake_all(void)
{
	int i;

	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
		struct hlist_node *p, *next;
		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
201
		raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
202 203 204 205 206 207
		hlist_for_each_safe(p, next, &b->list) {
			struct kvm_task_sleep_node *n =
				hlist_entry(p, typeof(*n), link);
			if (n->cpu == smp_processor_id())
				apf_task_wake_one(n);
		}
208
		raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	}
}

void kvm_async_pf_task_wake(u32 token)
{
	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
	struct kvm_task_sleep_node *n;

	if (token == ~0) {
		apf_task_wake_all();
		return;
	}

again:
224
	raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
225 226 227 228 229 230
	n = _find_apf_task(b, token);
	if (!n) {
		/*
		 * async PF was not yet handled.
		 * Add dummy entry for the token.
		 */
231
		n = kzalloc(sizeof(*n), GFP_ATOMIC);
G
Gleb Natapov 已提交
232 233 234 235 236
		if (!n) {
			/*
			 * Allocation failed! Busy wait while other cpu
			 * handles async PF.
			 */
237
			raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
238 239 240 241 242
			cpu_relax();
			goto again;
		}
		n->token = token;
		n->cpu = smp_processor_id();
243
		init_swait_queue_head(&n->wq);
G
Gleb Natapov 已提交
244 245 246
		hlist_add_head(&n->link, &b->list);
	} else
		apf_task_wake_one(n);
247
	raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
248 249 250 251 252 253 254 255
	return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);

u32 kvm_read_and_reset_pf_reason(void)
{
	u32 reason = 0;

256 257 258
	if (__this_cpu_read(apf_reason.enabled)) {
		reason = __this_cpu_read(apf_reason.reason);
		__this_cpu_write(apf_reason.reason, 0);
G
Gleb Natapov 已提交
259 260 261 262 263
	}

	return reason;
}
EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
264
NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
G
Gleb Natapov 已提交
265

266
dotraplinkage void
G
Gleb Natapov 已提交
267 268
do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
{
269 270
	enum ctx_state prev_state;

G
Gleb Natapov 已提交
271 272
	switch (kvm_read_and_reset_pf_reason()) {
	default:
273
		do_page_fault(regs, error_code);
G
Gleb Natapov 已提交
274 275 276
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		/* page is swapped out by the host. */
277
		prev_state = exception_enter();
278
		kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
279
		exception_exit(prev_state);
G
Gleb Natapov 已提交
280 281
		break;
	case KVM_PV_REASON_PAGE_READY:
282
		rcu_irq_enter();
G
Gleb Natapov 已提交
283
		kvm_async_pf_task_wake((u32)read_cr2());
284
		rcu_irq_exit();
G
Gleb Natapov 已提交
285 286 287
		break;
	}
}
288
NOKPROBE_SYMBOL(do_async_page_fault);
G
Gleb Natapov 已提交
289

290
static void __init paravirt_ops_setup(void)
291 292
{
	pv_info.name = "KVM";
293

294 295 296
	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
		pv_cpu_ops.io_delay = kvm_io_delay;

297 298 299
#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif
300 301
}

302 303 304 305 306 307 308 309
static void kvm_register_steal_time(void)
{
	int cpu = smp_processor_id();
	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);

	if (!has_steal_clock)
		return;

310
	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
311 312
	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
		cpu, (unsigned long long) slow_virt_to_phys(st));
313 314
}

315 316
static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;

317
static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
318 319 320 321 322 323 324 325
{
	/**
	 * This relies on __test_and_clear_bit to modify the memory
	 * in a way that is atomic with respect to the local CPU.
	 * The hypervisor only accesses this memory from the local CPU so
	 * there's no need for lock or memory barriers.
	 * An optimization barrier is implied in apic write.
	 */
326
	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
327
		return;
328
	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
329 330
}

331
static void kvm_guest_cpu_init(void)
332 333 334 335 336
{
	if (!kvm_para_available())
		return;

	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
337
		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
338

339 340 341
#ifdef CONFIG_PREEMPT
		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
342 343 344 345 346 347
		pa |= KVM_ASYNC_PF_ENABLED;

		/* Async page fault support for L1 hypervisor is optional */
		if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
			(pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
			wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
348
		__this_cpu_write(apf_reason.enabled, 1);
349 350 351
		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
		       smp_processor_id());
	}
352

353 354 355 356
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
		unsigned long pa;
		/* Size alignment is implied but just to make it explicit. */
		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
357 358
		__this_cpu_write(kvm_apic_eoi, 0);
		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
359
			| KVM_MSR_ENABLED;
360 361 362
		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
	}

363 364
	if (has_steal_clock)
		kvm_register_steal_time();
365 366
}

367
static void kvm_pv_disable_apf(void)
368
{
369
	if (!__this_cpu_read(apf_reason.enabled))
370 371 372
		return;

	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
373
	__this_cpu_write(apf_reason.enabled, 0);
374 375 376 377 378

	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
	       smp_processor_id());
}

379 380 381 382 383 384 385 386 387 388
static void kvm_pv_guest_cpu_reboot(void *unused)
{
	/*
	 * We disable PV EOI before we load a new kernel by kexec,
	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
	 * New kernel can re-enable when it boots.
	 */
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
	kvm_pv_disable_apf();
389
	kvm_disable_steal_time();
390 391
}

392 393 394 395
static int kvm_pv_reboot_notify(struct notifier_block *nb,
				unsigned long code, void *unused)
{
	if (code == SYS_RESTART)
396
		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
397 398 399 400 401 402 403
	return NOTIFY_DONE;
}

static struct notifier_block kvm_pv_reboot_nb = {
	.notifier_call = kvm_pv_reboot_notify,
};

404 405 406 407 408 409 410 411 412
static u64 kvm_steal_clock(int cpu)
{
	u64 steal;
	struct kvm_steal_time *src;
	int version;

	src = &per_cpu(steal_time, cpu);
	do {
		version = src->version;
413
		virt_rmb();
414
		steal = src->steal;
415
		virt_rmb();
416 417 418 419 420 421 422 423 424 425 426 427 428
	} while ((version & 1) || (version != src->version));

	return steal;
}

void kvm_disable_steal_time(void)
{
	if (!has_steal_clock)
		return;

	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}

429 430 431
#ifdef CONFIG_SMP
static void __init kvm_smp_prepare_boot_cpu(void)
{
432
	kvm_guest_cpu_init();
433
	native_smp_prepare_boot_cpu();
434
	kvm_spinlock_init();
435
}
436

437
static void kvm_guest_cpu_offline(void)
438
{
439
	kvm_disable_steal_time();
440 441 442
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
	kvm_pv_disable_apf();
G
Gleb Natapov 已提交
443
	apf_task_wake_all();
444 445
}

446
static int kvm_cpu_online(unsigned int cpu)
447
{
448 449 450 451
	local_irq_disable();
	kvm_guest_cpu_init();
	local_irq_enable();
	return 0;
452 453
}

454 455 456 457 458 459 460
static int kvm_cpu_down_prepare(unsigned int cpu)
{
	local_irq_disable();
	kvm_guest_cpu_offline();
	local_irq_enable();
	return 0;
}
461 462
#endif

G
Gleb Natapov 已提交
463 464
static void __init kvm_apf_trap_init(void)
{
T
Thomas Gleixner 已提交
465
	update_intr_gate(X86_TRAP_PF, async_page_fault);
G
Gleb Natapov 已提交
466 467
}

468 469
void __init kvm_guest_init(void)
{
G
Gleb Natapov 已提交
470 471
	int i;

472 473 474 475
	if (!kvm_para_available())
		return;

	paravirt_ops_setup();
476
	register_reboot_notifier(&kvm_pv_reboot_nb);
G
Gleb Natapov 已提交
477
	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
478
		raw_spin_lock_init(&async_pf_sleepers[i].lock);
G
Gleb Natapov 已提交
479 480 481
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
		x86_init.irqs.trap_init = kvm_apf_trap_init;

482 483 484 485 486
	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		has_steal_clock = 1;
		pv_time_ops.steal_clock = kvm_steal_clock;
	}

487 488
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);
489

490 491 492
	if (kvmclock_vsyscall)
		kvm_setup_vsyscall_timeinfo();

493 494
#ifdef CONFIG_SMP
	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
495 496 497
	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
		pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
498 499
#else
	kvm_guest_cpu_init();
500
#endif
501 502 503 504 505 506

	/*
	 * Hard lockup detection is enabled by default. Disable it, as guests
	 * can get false positives too easily, for example if the host is
	 * overcommitted.
	 */
507
	hardlockup_detector_disable();
508
}
509

510 511 512 513 514
static noinline uint32_t __kvm_cpuid_base(void)
{
	if (boot_cpu_data.cpuid_level < 0)
		return 0;	/* So we don't blow up on old processors */

515
	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);

	return 0;
}

static inline uint32_t kvm_cpuid_base(void)
{
	static int kvm_cpuid_base = -1;

	if (kvm_cpuid_base == -1)
		kvm_cpuid_base = __kvm_cpuid_base();

	return kvm_cpuid_base;
}

bool kvm_para_available(void)
{
	return kvm_cpuid_base() != 0;
}
EXPORT_SYMBOL_GPL(kvm_para_available);

537 538 539 540 541
unsigned int kvm_arch_para_features(void)
{
	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
}

J
Jason Wang 已提交
542
static uint32_t __init kvm_detect(void)
543
{
J
Jason Wang 已提交
544
	return kvm_cpuid_base();
545 546
}

547
const __initconst struct hypervisor_x86 x86_hyper_kvm = {
548 549
	.name			= "KVM",
	.detect			= kvm_detect,
550
	.type			= X86_HYPER_KVM,
551
	.init.x2apic_available	= kvm_para_available,
552 553
};

554 555 556
static __init int activate_jump_labels(void)
{
	if (has_steal_clock) {
557
		static_key_slow_inc(&paravirt_steal_enabled);
558
		if (steal_acc)
559
			static_key_slow_inc(&paravirt_steal_rq_enabled);
560 561 562 563 564
	}

	return 0;
}
arch_initcall(activate_jump_labels);
565 566 567 568

#ifdef CONFIG_PARAVIRT_SPINLOCKS

/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
569
static void kvm_kick_cpu(int cpu)
570 571 572 573 574 575 576 577
{
	int apicid;
	unsigned long flags = 0;

	apicid = per_cpu(x86_cpu_to_apicid, cpu);
	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
#include <asm/qspinlock.h>

static void kvm_wait(u8 *ptr, u8 val)
{
	unsigned long flags;

	if (in_nmi())
		return;

	local_irq_save(flags);

	if (READ_ONCE(*ptr) != val)
		goto out;

	/*
	 * halt until it's our turn and kicked. Note that we do safe halt
	 * for irq enabled case to avoid hang when lock info is overwritten
	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
	 */
	if (arch_irqs_disabled_flags(flags))
		halt();
	else
		safe_halt();

out:
	local_irq_restore(flags);
}

606
#ifdef CONFIG_X86_32
607
__visible bool __kvm_vcpu_is_preempted(long cpu)
608 609 610 611 612 613 614
{
	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);

	return !!src->preempted;
}
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
#else

#include <asm/asm-offsets.h>

extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);

/*
 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
 * restoring to/from the stack.
 */
asm(
".pushsection .text;"
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
"__raw_callee_save___kvm_vcpu_is_preempted:"
"movq	__per_cpu_offset(,%rdi,8), %rax;"
"cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
"setne	%al;"
"ret;"
".popsection");

#endif

638 639 640 641 642 643 644 645 646 647 648
/*
 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
 */
void __init kvm_spinlock_init(void)
{
	if (!kvm_para_available())
		return;
	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
		return;

649 650 651 652 653
	__pv_init_lock_hash();
	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
	pv_lock_ops.wait = kvm_wait;
	pv_lock_ops.kick = kvm_kick_cpu;
654 655 656 657 658

	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		pv_lock_ops.vcpu_is_preempted =
			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
	}
659 660
}

661
#endif	/* CONFIG_PARAVIRT_SPINLOCKS */