kvm.c 17.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * KVM paravirt_ops implementation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 * Copyright IBM Corporation, 2007
 *   Authors: Anthony Liguori <aliguori@us.ibm.com>
 */

23
#include <linux/context_tracking.h>
24
#include <linux/init.h>
25 26 27 28
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
#include <linux/mm.h>
29
#include <linux/highmem.h>
30
#include <linux/hardirq.h>
31 32
#include <linux/notifier.h>
#include <linux/reboot.h>
G
Gleb Natapov 已提交
33 34 35 36
#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
37
#include <linux/debugfs.h>
38
#include <linux/nmi.h>
39
#include <linux/swait.h>
40
#include <asm/timer.h>
41
#include <asm/cpu.h>
G
Gleb Natapov 已提交
42 43
#include <asm/traps.h>
#include <asm/desc.h>
44
#include <asm/tlbflush.h>
45 46
#include <asm/apic.h>
#include <asm/apicdef.h>
47
#include <asm/hypervisor.h>
48
#include <asm/kvm_guest.h>
49

50 51
static int kvmapf = 1;

52
static int __init parse_no_kvmapf(char *arg)
53 54 55 56 57 58 59
{
        kvmapf = 0;
        return 0;
}

early_param("no-kvmapf", parse_no_kvmapf);

60
static int steal_acc = 1;
61
static int __init parse_no_stealacc(char *arg)
62 63 64 65 66 67 68
{
        steal_acc = 0;
        return 0;
}

early_param("no-steal-acc", parse_no_stealacc);

69
static int kvmclock_vsyscall = 1;
70
static int __init parse_no_kvmclock_vsyscall(char *arg)
71 72 73 74 75 76 77
{
        kvmclock_vsyscall = 0;
        return 0;
}

early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);

78 79
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
80
static int has_steal_clock = 0;
81

82 83 84 85 86 87 88
/*
 * No need for any "IO delay" on KVM
 */
static void kvm_io_delay(void)
{
}

G
Gleb Natapov 已提交
89 90 91 92 93
#define KVM_TASK_SLEEP_HASHBITS 8
#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)

struct kvm_task_sleep_node {
	struct hlist_node link;
94
	struct swait_queue_head wq;
G
Gleb Natapov 已提交
95 96
	u32 token;
	int cpu;
97
	bool halted;
G
Gleb Natapov 已提交
98 99 100
};

static struct kvm_task_sleep_head {
101
	raw_spinlock_t lock;
G
Gleb Natapov 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	struct hlist_head list;
} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];

static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
						  u32 token)
{
	struct hlist_node *p;

	hlist_for_each(p, &b->list) {
		struct kvm_task_sleep_node *n =
			hlist_entry(p, typeof(*n), link);
		if (n->token == token)
			return n;
	}

	return NULL;
}

120 121 122 123 124
/*
 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
 * 		      (other than user space)?
 */
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
G
Gleb Natapov 已提交
125 126 127 128
{
	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
	struct kvm_task_sleep_node n, *e;
129
	DECLARE_SWAITQUEUE(wait);
G
Gleb Natapov 已提交
130

131 132
	rcu_irq_enter();

133
	raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
134 135 136 137 138
	e = _find_apf_task(b, token);
	if (e) {
		/* dummy entry exist -> wake up was delivered ahead of PF */
		hlist_del(&e->link);
		kfree(e);
139
		raw_spin_unlock(&b->lock);
140 141

		rcu_irq_exit();
G
Gleb Natapov 已提交
142 143 144 145 146
		return;
	}

	n.token = token;
	n.cpu = smp_processor_id();
147 148 149 150
	n.halted = is_idle_task(current) ||
		   (IS_ENABLED(CONFIG_PREEMPT_COUNT)
		    ? preempt_count() > 1 || rcu_preempt_depth()
		    : interrupt_kernel);
151
	init_swait_queue_head(&n.wq);
G
Gleb Natapov 已提交
152
	hlist_add_head(&n.link, &b->list);
153
	raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
154 155

	for (;;) {
156
		if (!n.halted)
157
			prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
G
Gleb Natapov 已提交
158 159
		if (hlist_unhashed(&n.link))
			break;
160

161 162
		rcu_irq_exit();

163 164 165 166 167 168 169 170 171 172 173
		if (!n.halted) {
			local_irq_enable();
			schedule();
			local_irq_disable();
		} else {
			/*
			 * We cannot reschedule. So halt.
			 */
			native_safe_halt();
			local_irq_disable();
		}
174 175

		rcu_irq_enter();
G
Gleb Natapov 已提交
176
	}
177
	if (!n.halted)
178
		finish_swait(&n.wq, &wait);
G
Gleb Natapov 已提交
179

180
	rcu_irq_exit();
G
Gleb Natapov 已提交
181 182 183 184 185 186 187
	return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);

static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{
	hlist_del_init(&n->link);
188 189
	if (n->halted)
		smp_send_reschedule(n->cpu);
190
	else if (swq_has_sleeper(&n->wq))
191
		swake_up(&n->wq);
G
Gleb Natapov 已提交
192 193 194 195 196 197 198 199 200
}

static void apf_task_wake_all(void)
{
	int i;

	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
		struct hlist_node *p, *next;
		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
201
		raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
202 203 204 205 206 207
		hlist_for_each_safe(p, next, &b->list) {
			struct kvm_task_sleep_node *n =
				hlist_entry(p, typeof(*n), link);
			if (n->cpu == smp_processor_id())
				apf_task_wake_one(n);
		}
208
		raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	}
}

void kvm_async_pf_task_wake(u32 token)
{
	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
	struct kvm_task_sleep_node *n;

	if (token == ~0) {
		apf_task_wake_all();
		return;
	}

again:
224
	raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
225 226 227 228 229 230
	n = _find_apf_task(b, token);
	if (!n) {
		/*
		 * async PF was not yet handled.
		 * Add dummy entry for the token.
		 */
231
		n = kzalloc(sizeof(*n), GFP_ATOMIC);
G
Gleb Natapov 已提交
232 233 234 235 236
		if (!n) {
			/*
			 * Allocation failed! Busy wait while other cpu
			 * handles async PF.
			 */
237
			raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
238 239 240 241 242
			cpu_relax();
			goto again;
		}
		n->token = token;
		n->cpu = smp_processor_id();
243
		init_swait_queue_head(&n->wq);
G
Gleb Natapov 已提交
244 245 246
		hlist_add_head(&n->link, &b->list);
	} else
		apf_task_wake_one(n);
247
	raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
248 249 250 251 252 253 254 255
	return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);

u32 kvm_read_and_reset_pf_reason(void)
{
	u32 reason = 0;

256 257 258
	if (__this_cpu_read(apf_reason.enabled)) {
		reason = __this_cpu_read(apf_reason.reason);
		__this_cpu_write(apf_reason.reason, 0);
G
Gleb Natapov 已提交
259 260 261 262 263
	}

	return reason;
}
EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
264
NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
G
Gleb Natapov 已提交
265

266
dotraplinkage void
G
Gleb Natapov 已提交
267 268
do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
{
269 270
	enum ctx_state prev_state;

G
Gleb Natapov 已提交
271 272
	switch (kvm_read_and_reset_pf_reason()) {
	default:
273
		do_page_fault(regs, error_code);
G
Gleb Natapov 已提交
274 275 276
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		/* page is swapped out by the host. */
277
		prev_state = exception_enter();
278
		kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
279
		exception_exit(prev_state);
G
Gleb Natapov 已提交
280 281
		break;
	case KVM_PV_REASON_PAGE_READY:
282
		rcu_irq_enter();
G
Gleb Natapov 已提交
283
		kvm_async_pf_task_wake((u32)read_cr2());
284
		rcu_irq_exit();
G
Gleb Natapov 已提交
285 286 287
		break;
	}
}
288
NOKPROBE_SYMBOL(do_async_page_fault);
G
Gleb Natapov 已提交
289

290
static void __init paravirt_ops_setup(void)
291 292
{
	pv_info.name = "KVM";
293

294 295 296
	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
		pv_cpu_ops.io_delay = kvm_io_delay;

297 298 299
#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif
300 301
}

302 303 304 305 306 307 308 309
static void kvm_register_steal_time(void)
{
	int cpu = smp_processor_id();
	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);

	if (!has_steal_clock)
		return;

310
	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
311 312
	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
		cpu, (unsigned long long) slow_virt_to_phys(st));
313 314
}

315
static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
316

317
static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
318 319 320 321 322 323 324 325
{
	/**
	 * This relies on __test_and_clear_bit to modify the memory
	 * in a way that is atomic with respect to the local CPU.
	 * The hypervisor only accesses this memory from the local CPU so
	 * there's no need for lock or memory barriers.
	 * An optimization barrier is implied in apic write.
	 */
326
	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
327
		return;
328
	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
329 330
}

331
static void kvm_guest_cpu_init(void)
332 333 334 335 336
{
	if (!kvm_para_available())
		return;

	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
337
		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
338

339 340 341
#ifdef CONFIG_PREEMPT
		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
342 343
		pa |= KVM_ASYNC_PF_ENABLED;

344 345 346 347
		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;

		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
348
		__this_cpu_write(apf_reason.enabled, 1);
349 350 351
		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
		       smp_processor_id());
	}
352

353 354 355 356
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
		unsigned long pa;
		/* Size alignment is implied but just to make it explicit. */
		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
357 358
		__this_cpu_write(kvm_apic_eoi, 0);
		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
359
			| KVM_MSR_ENABLED;
360 361 362
		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
	}

363 364
	if (has_steal_clock)
		kvm_register_steal_time();
365 366
}

367
static void kvm_pv_disable_apf(void)
368
{
369
	if (!__this_cpu_read(apf_reason.enabled))
370 371 372
		return;

	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
373
	__this_cpu_write(apf_reason.enabled, 0);
374 375 376 377 378

	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
	       smp_processor_id());
}

379 380 381 382 383 384 385 386 387 388
static void kvm_pv_guest_cpu_reboot(void *unused)
{
	/*
	 * We disable PV EOI before we load a new kernel by kexec,
	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
	 * New kernel can re-enable when it boots.
	 */
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
	kvm_pv_disable_apf();
389
	kvm_disable_steal_time();
390 391
}

392 393 394 395
static int kvm_pv_reboot_notify(struct notifier_block *nb,
				unsigned long code, void *unused)
{
	if (code == SYS_RESTART)
396
		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
397 398 399 400 401 402 403
	return NOTIFY_DONE;
}

static struct notifier_block kvm_pv_reboot_nb = {
	.notifier_call = kvm_pv_reboot_notify,
};

404 405 406 407 408 409 410 411 412
static u64 kvm_steal_clock(int cpu)
{
	u64 steal;
	struct kvm_steal_time *src;
	int version;

	src = &per_cpu(steal_time, cpu);
	do {
		version = src->version;
413
		virt_rmb();
414
		steal = src->steal;
415
		virt_rmb();
416 417 418 419 420 421 422 423 424 425 426 427 428
	} while ((version & 1) || (version != src->version));

	return steal;
}

void kvm_disable_steal_time(void)
{
	if (!has_steal_clock)
		return;

	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
{
	early_set_memory_decrypted((unsigned long) ptr, size);
}

/*
 * Iterate through all possible CPUs and map the memory region pointed
 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
 *
 * Note: we iterate through all possible CPUs to ensure that CPUs
 * hotplugged will have their per-cpu variable already mapped as
 * decrypted.
 */
static void __init sev_map_percpu_data(void)
{
	int cpu;

	if (!sev_active())
		return;

	for_each_possible_cpu(cpu) {
		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
	}
}

456
#ifdef CONFIG_SMP
457 458 459
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
{
	native_smp_prepare_cpus(max_cpus);
460
	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
461 462 463
		static_branch_disable(&virt_spin_lock_key);
}

464 465
static void __init kvm_smp_prepare_boot_cpu(void)
{
466 467 468 469 470 471
	/*
	 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
	 * shares the guest physical address with the hypervisor.
	 */
	sev_map_percpu_data();

472
	kvm_guest_cpu_init();
473
	native_smp_prepare_boot_cpu();
474
	kvm_spinlock_init();
475
}
476

477
static void kvm_guest_cpu_offline(void)
478
{
479
	kvm_disable_steal_time();
480 481 482
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
	kvm_pv_disable_apf();
G
Gleb Natapov 已提交
483
	apf_task_wake_all();
484 485
}

486
static int kvm_cpu_online(unsigned int cpu)
487
{
488 489 490 491
	local_irq_disable();
	kvm_guest_cpu_init();
	local_irq_enable();
	return 0;
492 493
}

494 495 496 497 498 499 500
static int kvm_cpu_down_prepare(unsigned int cpu)
{
	local_irq_disable();
	kvm_guest_cpu_offline();
	local_irq_enable();
	return 0;
}
501 502
#endif

G
Gleb Natapov 已提交
503 504
static void __init kvm_apf_trap_init(void)
{
T
Thomas Gleixner 已提交
505
	update_intr_gate(X86_TRAP_PF, async_page_fault);
G
Gleb Natapov 已提交
506 507
}

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);

static void kvm_flush_tlb_others(const struct cpumask *cpumask,
			const struct flush_tlb_info *info)
{
	u8 state;
	int cpu;
	struct kvm_steal_time *src;
	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);

	cpumask_copy(flushmask, cpumask);
	/*
	 * We have to call flush only on online vCPUs. And
	 * queue flush_on_enter for pre-empted vCPUs
	 */
	for_each_cpu(cpu, flushmask) {
		src = &per_cpu(steal_time, cpu);
		state = READ_ONCE(src->preempted);
		if ((state & KVM_VCPU_PREEMPTED)) {
			if (try_cmpxchg(&src->preempted, &state,
					state | KVM_VCPU_FLUSH_TLB))
				__cpumask_clear_cpu(cpu, flushmask);
		}
	}

	native_flush_tlb_others(flushmask, info);
}

536
static void __init kvm_guest_init(void)
537
{
G
Gleb Natapov 已提交
538 539
	int i;

540 541 542 543
	if (!kvm_para_available())
		return;

	paravirt_ops_setup();
544
	register_reboot_notifier(&kvm_pv_reboot_nb);
G
Gleb Natapov 已提交
545
	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
546
		raw_spin_lock_init(&async_pf_sleepers[i].lock);
G
Gleb Natapov 已提交
547 548 549
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
		x86_init.irqs.trap_init = kvm_apf_trap_init;

550 551 552 553 554
	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		has_steal_clock = 1;
		pv_time_ops.steal_clock = kvm_steal_clock;
	}

555
	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
556
	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
557
	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
558 559
		pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;

560 561
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);
562

563 564 565
	if (kvmclock_vsyscall)
		kvm_setup_vsyscall_timeinfo();

566
#ifdef CONFIG_SMP
567
	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
568
	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
569 570 571
	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
		pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
572
#else
573
	sev_map_percpu_data();
574
	kvm_guest_cpu_init();
575
#endif
576 577 578 579 580 581

	/*
	 * Hard lockup detection is enabled by default. Disable it, as guests
	 * can get false positives too easily, for example if the host is
	 * overcommitted.
	 */
582
	hardlockup_detector_disable();
583
}
584

585 586 587 588 589
static noinline uint32_t __kvm_cpuid_base(void)
{
	if (boot_cpu_data.cpuid_level < 0)
		return 0;	/* So we don't blow up on old processors */

590
	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);

	return 0;
}

static inline uint32_t kvm_cpuid_base(void)
{
	static int kvm_cpuid_base = -1;

	if (kvm_cpuid_base == -1)
		kvm_cpuid_base = __kvm_cpuid_base();

	return kvm_cpuid_base;
}

bool kvm_para_available(void)
{
	return kvm_cpuid_base() != 0;
}
EXPORT_SYMBOL_GPL(kvm_para_available);

612 613 614 615 616
unsigned int kvm_arch_para_features(void)
{
	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
}

617 618 619 620 621
unsigned int kvm_arch_para_hints(void)
{
	return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
}

J
Jason Wang 已提交
622
static uint32_t __init kvm_detect(void)
623
{
J
Jason Wang 已提交
624
	return kvm_cpuid_base();
625 626
}

627
const __initconst struct hypervisor_x86 x86_hyper_kvm = {
628 629
	.name			= "KVM",
	.detect			= kvm_detect,
630
	.type			= X86_HYPER_KVM,
631
	.init.guest_late_init	= kvm_guest_init,
632
	.init.x2apic_available	= kvm_para_available,
633 634
};

635 636 637
static __init int activate_jump_labels(void)
{
	if (has_steal_clock) {
638
		static_key_slow_inc(&paravirt_steal_enabled);
639
		if (steal_acc)
640
			static_key_slow_inc(&paravirt_steal_rq_enabled);
641 642 643 644 645
	}

	return 0;
}
arch_initcall(activate_jump_labels);
646

647 648 649 650
static __init int kvm_setup_pv_tlb_flush(void)
{
	int cpu;

651
	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
652
	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
653
	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
654 655 656 657 658 659 660 661 662 663 664
		for_each_possible_cpu(cpu) {
			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
				GFP_KERNEL, cpu_to_node(cpu));
		}
		pr_info("KVM setup pv remote TLB flush\n");
	}

	return 0;
}
arch_initcall(kvm_setup_pv_tlb_flush);

665 666 667
#ifdef CONFIG_PARAVIRT_SPINLOCKS

/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
668
static void kvm_kick_cpu(int cpu)
669 670 671 672 673 674 675 676
{
	int apicid;
	unsigned long flags = 0;

	apicid = per_cpu(x86_cpu_to_apicid, cpu);
	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
#include <asm/qspinlock.h>

static void kvm_wait(u8 *ptr, u8 val)
{
	unsigned long flags;

	if (in_nmi())
		return;

	local_irq_save(flags);

	if (READ_ONCE(*ptr) != val)
		goto out;

	/*
	 * halt until it's our turn and kicked. Note that we do safe halt
	 * for irq enabled case to avoid hang when lock info is overwritten
	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
	 */
	if (arch_irqs_disabled_flags(flags))
		halt();
	else
		safe_halt();

out:
	local_irq_restore(flags);
}

705
#ifdef CONFIG_X86_32
706
__visible bool __kvm_vcpu_is_preempted(long cpu)
707 708 709
{
	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);

W
Wanpeng Li 已提交
710
	return !!(src->preempted & KVM_VCPU_PREEMPTED);
711 712 713
}
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
#else

#include <asm/asm-offsets.h>

extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);

/*
 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
 * restoring to/from the stack.
 */
asm(
".pushsection .text;"
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
"__raw_callee_save___kvm_vcpu_is_preempted:"
"movq	__per_cpu_offset(,%rdi,8), %rax;"
"cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
"setne	%al;"
"ret;"
".popsection");

#endif

737 738 739 740 741 742 743 744 745 746 747
/*
 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
 */
void __init kvm_spinlock_init(void)
{
	if (!kvm_para_available())
		return;
	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
		return;

748
	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
749 750
		return;

751 752 753 754 755
	__pv_init_lock_hash();
	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
	pv_lock_ops.wait = kvm_wait;
	pv_lock_ops.kick = kvm_kick_cpu;
756 757 758 759 760

	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		pv_lock_ops.vcpu_is_preempted =
			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
	}
761 762
}

763
#endif	/* CONFIG_PARAVIRT_SPINLOCKS */