kvm.c 17.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * KVM paravirt_ops implementation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 * Copyright IBM Corporation, 2007
 *   Authors: Anthony Liguori <aliguori@us.ibm.com>
 */

23
#include <linux/context_tracking.h>
24
#include <linux/init.h>
25 26 27 28
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
#include <linux/mm.h>
29
#include <linux/highmem.h>
30
#include <linux/hardirq.h>
31 32
#include <linux/notifier.h>
#include <linux/reboot.h>
G
Gleb Natapov 已提交
33 34 35 36
#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
37
#include <linux/debugfs.h>
38
#include <linux/nmi.h>
39
#include <linux/swait.h>
40
#include <asm/timer.h>
41
#include <asm/cpu.h>
G
Gleb Natapov 已提交
42 43
#include <asm/traps.h>
#include <asm/desc.h>
44
#include <asm/tlbflush.h>
45 46
#include <asm/apic.h>
#include <asm/apicdef.h>
47
#include <asm/hypervisor.h>
48

49 50
static int kvmapf = 1;

51
static int __init parse_no_kvmapf(char *arg)
52 53 54 55 56 57 58
{
        kvmapf = 0;
        return 0;
}

early_param("no-kvmapf", parse_no_kvmapf);

59
static int steal_acc = 1;
60
static int __init parse_no_stealacc(char *arg)
61 62 63 64 65 66 67
{
        steal_acc = 0;
        return 0;
}

early_param("no-steal-acc", parse_no_stealacc);

68 69
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
70
static int has_steal_clock = 0;
71

72 73 74 75 76 77 78
/*
 * No need for any "IO delay" on KVM
 */
static void kvm_io_delay(void)
{
}

G
Gleb Natapov 已提交
79 80 81 82 83
#define KVM_TASK_SLEEP_HASHBITS 8
#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)

struct kvm_task_sleep_node {
	struct hlist_node link;
84
	struct swait_queue_head wq;
G
Gleb Natapov 已提交
85 86
	u32 token;
	int cpu;
87
	bool halted;
G
Gleb Natapov 已提交
88 89 90
};

static struct kvm_task_sleep_head {
91
	raw_spinlock_t lock;
G
Gleb Natapov 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	struct hlist_head list;
} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];

static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
						  u32 token)
{
	struct hlist_node *p;

	hlist_for_each(p, &b->list) {
		struct kvm_task_sleep_node *n =
			hlist_entry(p, typeof(*n), link);
		if (n->token == token)
			return n;
	}

	return NULL;
}

110 111 112 113 114
/*
 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
 * 		      (other than user space)?
 */
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
G
Gleb Natapov 已提交
115 116 117 118
{
	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
	struct kvm_task_sleep_node n, *e;
119
	DECLARE_SWAITQUEUE(wait);
G
Gleb Natapov 已提交
120

121 122
	rcu_irq_enter();

123
	raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
124 125 126 127 128
	e = _find_apf_task(b, token);
	if (e) {
		/* dummy entry exist -> wake up was delivered ahead of PF */
		hlist_del(&e->link);
		kfree(e);
129
		raw_spin_unlock(&b->lock);
130 131

		rcu_irq_exit();
G
Gleb Natapov 已提交
132 133 134 135 136
		return;
	}

	n.token = token;
	n.cpu = smp_processor_id();
137 138 139 140
	n.halted = is_idle_task(current) ||
		   (IS_ENABLED(CONFIG_PREEMPT_COUNT)
		    ? preempt_count() > 1 || rcu_preempt_depth()
		    : interrupt_kernel);
141
	init_swait_queue_head(&n.wq);
G
Gleb Natapov 已提交
142
	hlist_add_head(&n.link, &b->list);
143
	raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
144 145

	for (;;) {
146
		if (!n.halted)
147
			prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
G
Gleb Natapov 已提交
148 149
		if (hlist_unhashed(&n.link))
			break;
150

151 152
		rcu_irq_exit();

153 154 155 156 157 158 159 160 161 162 163
		if (!n.halted) {
			local_irq_enable();
			schedule();
			local_irq_disable();
		} else {
			/*
			 * We cannot reschedule. So halt.
			 */
			native_safe_halt();
			local_irq_disable();
		}
164 165

		rcu_irq_enter();
G
Gleb Natapov 已提交
166
	}
167
	if (!n.halted)
168
		finish_swait(&n.wq, &wait);
G
Gleb Natapov 已提交
169

170
	rcu_irq_exit();
G
Gleb Natapov 已提交
171 172 173 174 175 176 177
	return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);

static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{
	hlist_del_init(&n->link);
178 179
	if (n->halted)
		smp_send_reschedule(n->cpu);
180
	else if (swq_has_sleeper(&n->wq))
181
		swake_up_one(&n->wq);
G
Gleb Natapov 已提交
182 183 184 185 186 187 188 189 190
}

static void apf_task_wake_all(void)
{
	int i;

	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
		struct hlist_node *p, *next;
		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191
		raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
192 193 194 195 196 197
		hlist_for_each_safe(p, next, &b->list) {
			struct kvm_task_sleep_node *n =
				hlist_entry(p, typeof(*n), link);
			if (n->cpu == smp_processor_id())
				apf_task_wake_one(n);
		}
198
		raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	}
}

void kvm_async_pf_task_wake(u32 token)
{
	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
	struct kvm_task_sleep_node *n;

	if (token == ~0) {
		apf_task_wake_all();
		return;
	}

again:
214
	raw_spin_lock(&b->lock);
G
Gleb Natapov 已提交
215 216 217 218 219 220
	n = _find_apf_task(b, token);
	if (!n) {
		/*
		 * async PF was not yet handled.
		 * Add dummy entry for the token.
		 */
221
		n = kzalloc(sizeof(*n), GFP_ATOMIC);
G
Gleb Natapov 已提交
222 223 224 225 226
		if (!n) {
			/*
			 * Allocation failed! Busy wait while other cpu
			 * handles async PF.
			 */
227
			raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
228 229 230 231 232
			cpu_relax();
			goto again;
		}
		n->token = token;
		n->cpu = smp_processor_id();
233
		init_swait_queue_head(&n->wq);
G
Gleb Natapov 已提交
234 235 236
		hlist_add_head(&n->link, &b->list);
	} else
		apf_task_wake_one(n);
237
	raw_spin_unlock(&b->lock);
G
Gleb Natapov 已提交
238 239 240 241 242 243 244 245
	return;
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);

u32 kvm_read_and_reset_pf_reason(void)
{
	u32 reason = 0;

246 247 248
	if (__this_cpu_read(apf_reason.enabled)) {
		reason = __this_cpu_read(apf_reason.reason);
		__this_cpu_write(apf_reason.reason, 0);
G
Gleb Natapov 已提交
249 250 251 252 253
	}

	return reason;
}
EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254
NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
G
Gleb Natapov 已提交
255

256
dotraplinkage void
G
Gleb Natapov 已提交
257 258
do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
{
259 260
	enum ctx_state prev_state;

G
Gleb Natapov 已提交
261 262
	switch (kvm_read_and_reset_pf_reason()) {
	default:
263
		do_page_fault(regs, error_code);
G
Gleb Natapov 已提交
264 265 266
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		/* page is swapped out by the host. */
267
		prev_state = exception_enter();
268
		kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
269
		exception_exit(prev_state);
G
Gleb Natapov 已提交
270 271
		break;
	case KVM_PV_REASON_PAGE_READY:
272
		rcu_irq_enter();
G
Gleb Natapov 已提交
273
		kvm_async_pf_task_wake((u32)read_cr2());
274
		rcu_irq_exit();
G
Gleb Natapov 已提交
275 276 277
		break;
	}
}
278
NOKPROBE_SYMBOL(do_async_page_fault);
G
Gleb Natapov 已提交
279

280
static void __init paravirt_ops_setup(void)
281 282
{
	pv_info.name = "KVM";
283

284 285 286
	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
		pv_cpu_ops.io_delay = kvm_io_delay;

287 288 289
#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif
290 291
}

292 293 294 295 296 297 298 299
static void kvm_register_steal_time(void)
{
	int cpu = smp_processor_id();
	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);

	if (!has_steal_clock)
		return;

300
	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
301 302
	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
		cpu, (unsigned long long) slow_virt_to_phys(st));
303 304
}

305
static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
306

307
static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
308 309 310 311 312 313 314 315
{
	/**
	 * This relies on __test_and_clear_bit to modify the memory
	 * in a way that is atomic with respect to the local CPU.
	 * The hypervisor only accesses this memory from the local CPU so
	 * there's no need for lock or memory barriers.
	 * An optimization barrier is implied in apic write.
	 */
316
	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
317
		return;
318
	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
319 320
}

321
static void kvm_guest_cpu_init(void)
322 323 324 325 326
{
	if (!kvm_para_available())
		return;

	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
327
		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
328

329 330 331
#ifdef CONFIG_PREEMPT
		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
332 333
		pa |= KVM_ASYNC_PF_ENABLED;

334 335 336 337
		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;

		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
338
		__this_cpu_write(apf_reason.enabled, 1);
339 340 341
		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
		       smp_processor_id());
	}
342

343 344 345 346
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
		unsigned long pa;
		/* Size alignment is implied but just to make it explicit. */
		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
347 348
		__this_cpu_write(kvm_apic_eoi, 0);
		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
349
			| KVM_MSR_ENABLED;
350 351 352
		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
	}

353 354
	if (has_steal_clock)
		kvm_register_steal_time();
355 356
}

357
static void kvm_pv_disable_apf(void)
358
{
359
	if (!__this_cpu_read(apf_reason.enabled))
360 361 362
		return;

	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
363
	__this_cpu_write(apf_reason.enabled, 0);
364 365 366 367 368

	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
	       smp_processor_id());
}

369 370 371 372 373 374 375 376 377 378
static void kvm_pv_guest_cpu_reboot(void *unused)
{
	/*
	 * We disable PV EOI before we load a new kernel by kexec,
	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
	 * New kernel can re-enable when it boots.
	 */
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
	kvm_pv_disable_apf();
379
	kvm_disable_steal_time();
380 381
}

382 383 384 385
static int kvm_pv_reboot_notify(struct notifier_block *nb,
				unsigned long code, void *unused)
{
	if (code == SYS_RESTART)
386
		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
387 388 389 390 391 392 393
	return NOTIFY_DONE;
}

static struct notifier_block kvm_pv_reboot_nb = {
	.notifier_call = kvm_pv_reboot_notify,
};

394 395 396 397 398 399 400 401 402
static u64 kvm_steal_clock(int cpu)
{
	u64 steal;
	struct kvm_steal_time *src;
	int version;

	src = &per_cpu(steal_time, cpu);
	do {
		version = src->version;
403
		virt_rmb();
404
		steal = src->steal;
405
		virt_rmb();
406 407 408 409 410 411 412 413 414 415 416 417 418
	} while ((version & 1) || (version != src->version));

	return steal;
}

void kvm_disable_steal_time(void)
{
	if (!has_steal_clock)
		return;

	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
{
	early_set_memory_decrypted((unsigned long) ptr, size);
}

/*
 * Iterate through all possible CPUs and map the memory region pointed
 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
 *
 * Note: we iterate through all possible CPUs to ensure that CPUs
 * hotplugged will have their per-cpu variable already mapped as
 * decrypted.
 */
static void __init sev_map_percpu_data(void)
{
	int cpu;

	if (!sev_active())
		return;

	for_each_possible_cpu(cpu) {
		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
	}
}

446
#ifdef CONFIG_SMP
447 448 449
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
{
	native_smp_prepare_cpus(max_cpus);
450
	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
451 452 453
		static_branch_disable(&virt_spin_lock_key);
}

454 455
static void __init kvm_smp_prepare_boot_cpu(void)
{
456 457 458 459 460 461
	/*
	 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
	 * shares the guest physical address with the hypervisor.
	 */
	sev_map_percpu_data();

462
	kvm_guest_cpu_init();
463
	native_smp_prepare_boot_cpu();
464
	kvm_spinlock_init();
465
}
466

467
static void kvm_guest_cpu_offline(void)
468
{
469
	kvm_disable_steal_time();
470 471 472
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
	kvm_pv_disable_apf();
G
Gleb Natapov 已提交
473
	apf_task_wake_all();
474 475
}

476
static int kvm_cpu_online(unsigned int cpu)
477
{
478 479 480 481
	local_irq_disable();
	kvm_guest_cpu_init();
	local_irq_enable();
	return 0;
482 483
}

484 485 486 487 488 489 490
static int kvm_cpu_down_prepare(unsigned int cpu)
{
	local_irq_disable();
	kvm_guest_cpu_offline();
	local_irq_enable();
	return 0;
}
491 492
#endif

G
Gleb Natapov 已提交
493 494
static void __init kvm_apf_trap_init(void)
{
T
Thomas Gleixner 已提交
495
	update_intr_gate(X86_TRAP_PF, async_page_fault);
G
Gleb Natapov 已提交
496 497
}

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);

static void kvm_flush_tlb_others(const struct cpumask *cpumask,
			const struct flush_tlb_info *info)
{
	u8 state;
	int cpu;
	struct kvm_steal_time *src;
	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);

	cpumask_copy(flushmask, cpumask);
	/*
	 * We have to call flush only on online vCPUs. And
	 * queue flush_on_enter for pre-empted vCPUs
	 */
	for_each_cpu(cpu, flushmask) {
		src = &per_cpu(steal_time, cpu);
		state = READ_ONCE(src->preempted);
		if ((state & KVM_VCPU_PREEMPTED)) {
			if (try_cmpxchg(&src->preempted, &state,
					state | KVM_VCPU_FLUSH_TLB))
				__cpumask_clear_cpu(cpu, flushmask);
		}
	}

	native_flush_tlb_others(flushmask, info);
}

526
static void __init kvm_guest_init(void)
527
{
G
Gleb Natapov 已提交
528 529
	int i;

530 531 532 533
	if (!kvm_para_available())
		return;

	paravirt_ops_setup();
534
	register_reboot_notifier(&kvm_pv_reboot_nb);
G
Gleb Natapov 已提交
535
	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
536
		raw_spin_lock_init(&async_pf_sleepers[i].lock);
G
Gleb Natapov 已提交
537 538 539
	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
		x86_init.irqs.trap_init = kvm_apf_trap_init;

540 541 542 543 544
	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		has_steal_clock = 1;
		pv_time_ops.steal_clock = kvm_steal_clock;
	}

545
	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
546
	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
547
	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
548 549
		pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;

550 551
	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);
552

553
#ifdef CONFIG_SMP
554
	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
555
	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
556 557 558
	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
		pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
559
#else
560
	sev_map_percpu_data();
561
	kvm_guest_cpu_init();
562
#endif
563 564 565 566 567 568

	/*
	 * Hard lockup detection is enabled by default. Disable it, as guests
	 * can get false positives too easily, for example if the host is
	 * overcommitted.
	 */
569
	hardlockup_detector_disable();
570
}
571

572 573 574 575 576
static noinline uint32_t __kvm_cpuid_base(void)
{
	if (boot_cpu_data.cpuid_level < 0)
		return 0;	/* So we don't blow up on old processors */

577
	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);

	return 0;
}

static inline uint32_t kvm_cpuid_base(void)
{
	static int kvm_cpuid_base = -1;

	if (kvm_cpuid_base == -1)
		kvm_cpuid_base = __kvm_cpuid_base();

	return kvm_cpuid_base;
}

bool kvm_para_available(void)
{
	return kvm_cpuid_base() != 0;
}
EXPORT_SYMBOL_GPL(kvm_para_available);

599 600 601 602 603
unsigned int kvm_arch_para_features(void)
{
	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
}

604 605 606 607 608
unsigned int kvm_arch_para_hints(void)
{
	return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
}

J
Jason Wang 已提交
609
static uint32_t __init kvm_detect(void)
610
{
J
Jason Wang 已提交
611
	return kvm_cpuid_base();
612 613
}

614
const __initconst struct hypervisor_x86 x86_hyper_kvm = {
615 616
	.name			= "KVM",
	.detect			= kvm_detect,
617
	.type			= X86_HYPER_KVM,
618
	.init.init_platform	= kvmclock_init,
619
	.init.guest_late_init	= kvm_guest_init,
620
	.init.x2apic_available	= kvm_para_available,
621 622
};

623 624 625
static __init int activate_jump_labels(void)
{
	if (has_steal_clock) {
626
		static_key_slow_inc(&paravirt_steal_enabled);
627
		if (steal_acc)
628
			static_key_slow_inc(&paravirt_steal_rq_enabled);
629 630 631 632 633
	}

	return 0;
}
arch_initcall(activate_jump_labels);
634

635 636 637 638
static __init int kvm_setup_pv_tlb_flush(void)
{
	int cpu;

639
	if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
640
	    !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
641
	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
642 643 644 645 646 647 648 649 650 651 652
		for_each_possible_cpu(cpu) {
			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
				GFP_KERNEL, cpu_to_node(cpu));
		}
		pr_info("KVM setup pv remote TLB flush\n");
	}

	return 0;
}
arch_initcall(kvm_setup_pv_tlb_flush);

653 654 655
#ifdef CONFIG_PARAVIRT_SPINLOCKS

/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
656
static void kvm_kick_cpu(int cpu)
657 658 659 660 661 662 663 664
{
	int apicid;
	unsigned long flags = 0;

	apicid = per_cpu(x86_cpu_to_apicid, cpu);
	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
}

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
#include <asm/qspinlock.h>

static void kvm_wait(u8 *ptr, u8 val)
{
	unsigned long flags;

	if (in_nmi())
		return;

	local_irq_save(flags);

	if (READ_ONCE(*ptr) != val)
		goto out;

	/*
	 * halt until it's our turn and kicked. Note that we do safe halt
	 * for irq enabled case to avoid hang when lock info is overwritten
	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
	 */
	if (arch_irqs_disabled_flags(flags))
		halt();
	else
		safe_halt();

out:
	local_irq_restore(flags);
}

693
#ifdef CONFIG_X86_32
694
__visible bool __kvm_vcpu_is_preempted(long cpu)
695 696 697
{
	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);

W
Wanpeng Li 已提交
698
	return !!(src->preempted & KVM_VCPU_PREEMPTED);
699 700 701
}
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
#else

#include <asm/asm-offsets.h>

extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);

/*
 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
 * restoring to/from the stack.
 */
asm(
".pushsection .text;"
".global __raw_callee_save___kvm_vcpu_is_preempted;"
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
"__raw_callee_save___kvm_vcpu_is_preempted:"
"movq	__per_cpu_offset(,%rdi,8), %rax;"
"cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
"setne	%al;"
"ret;"
".popsection");

#endif

725 726 727 728 729 730 731 732 733 734 735
/*
 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
 */
void __init kvm_spinlock_init(void)
{
	if (!kvm_para_available())
		return;
	/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
		return;

736
	if (kvm_para_has_hint(KVM_HINTS_REALTIME))
737 738
		return;

739 740 741 742 743
	__pv_init_lock_hash();
	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
	pv_lock_ops.wait = kvm_wait;
	pv_lock_ops.kick = kvm_kick_cpu;
744 745 746 747 748

	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
		pv_lock_ops.vcpu_is_preempted =
			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
	}
749 750
}

751
#endif	/* CONFIG_PARAVIRT_SPINLOCKS */