mmu.c 44.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 */
6 7 8 9

#include <linux/mman.h>
#include <linux/kvm_host.h>
#include <linux/io.h>
10
#include <linux/hugetlb.h>
11
#include <linux/sched/signal.h>
C
Christoffer Dall 已提交
12
#include <trace/events/kvm.h>
13
#include <asm/pgalloc.h>
14
#include <asm/cacheflush.h>
15 16
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
17
#include <asm/kvm_pgtable.h>
18
#include <asm/kvm_ras.h>
19
#include <asm/kvm_asm.h>
20
#include <asm/kvm_emulate.h>
21
#include <asm/virt.h>
22 23

#include "trace.h"
24

25
static struct kvm_pgtable *hyp_pgtable;
26 27
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);

28 29 30 31
static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;

32 33
static unsigned long io_map_base;

34

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
 * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
 * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too
 * long will also starve other vCPUs. We have to also make sure that the page
 * tables are not freed while we released the lock.
 */
static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
			      phys_addr_t end,
			      int (*fn)(struct kvm_pgtable *, u64, u64),
			      bool resched)
{
	int ret;
	u64 next;

	do {
		struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
		if (!pgt)
			return -EINVAL;

		next = stage2_pgd_addr_end(kvm, addr, end);
		ret = fn(pgt, addr, next - addr);
		if (ret)
			break;

		if (resched && next != end)
			cond_resched_lock(&kvm->mmu_lock);
	} while (addr = next, addr != end);

	return ret;
}

67 68 69
#define stage2_apply_range_resched(kvm, addr, end, fn)			\
	stage2_apply_range(kvm, addr, end, fn, true)

70 71 72
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
{
	return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
73 74 75 76 77 78 79 80 81 82
}

/**
 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
 * @kvm:	pointer to kvm structure.
 *
 * Interface to HYP function to flush all VM TLB entries
 */
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
83
	++kvm->stat.generic.remote_tlb_flush_requests;
84
	kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
85
}
86

87 88
static bool kvm_is_device_pfn(unsigned long pfn)
{
89
	return !pfn_is_map_memory(pfn);
90 91
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static void *stage2_memcache_zalloc_page(void *arg)
{
	struct kvm_mmu_memory_cache *mc = arg;

	/* Allocated with __GFP_ZERO, so no need to zero */
	return kvm_mmu_memory_cache_alloc(mc);
}

static void *kvm_host_zalloc_pages_exact(size_t size)
{
	return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
}

static void kvm_host_get_page(void *addr)
{
	get_page(virt_to_page(addr));
}

static void kvm_host_put_page(void *addr)
{
	put_page(virt_to_page(addr));
}

static int kvm_host_page_count(void *addr)
{
	return page_count(virt_to_page(addr));
}

static phys_addr_t kvm_host_pa(void *addr)
{
	return __pa(addr);
}

static void *kvm_host_va(phys_addr_t phys)
{
	return __va(phys);
}

130 131 132 133 134 135 136 137 138 139
static void clean_dcache_guest_page(void *va, size_t size)
{
	__clean_dcache_guest_page(va, size);
}

static void invalidate_icache_guest_page(void *va, size_t size)
{
	__invalidate_icache_guest_page(va, size);
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
/*
 * Unmapping vs dcache management:
 *
 * If a guest maps certain memory pages as uncached, all writes will
 * bypass the data cache and go directly to RAM.  However, the CPUs
 * can still speculate reads (not writes) and fill cache lines with
 * data.
 *
 * Those cache lines will be *clean* cache lines though, so a
 * clean+invalidate operation is equivalent to an invalidate
 * operation, because no cache lines are marked dirty.
 *
 * Those clean cache lines could be filled prior to an uncached write
 * by the guest, and the cache coherent IO subsystem would therefore
 * end up writing old data to disk.
 *
 * This is why right after unmapping a page/section and invalidating
157 158
 * the corresponding TLBs, we flush to make sure the IO subsystem will
 * never hit in the cache.
159 160 161 162
 *
 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
 * we then fully enforce cacheability of RAM, no matter what the guest
 * does.
163
 */
164 165
/**
 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
166
 * @mmu:   The KVM stage-2 MMU pointer
167 168
 * @start: The intermediate physical base address of the range to unmap
 * @size:  The size of the area to unmap
169
 * @may_block: Whether or not we are permitted to block
170 171 172 173 174 175
 *
 * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
 * destroying the VM), otherwise another faulting VCPU may come in and mess
 * with things behind our backs.
 */
176 177
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
				 bool may_block)
178
{
179
	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
180
	phys_addr_t end = start + size;
181

182
	assert_spin_locked(&kvm->mmu_lock);
183
	WARN_ON(size & ~PAGE_MASK);
184 185
	WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
				   may_block));
186 187
}

188 189 190 191 192
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
{
	__unmap_stage2_range(mmu, start, size, true);
}

193 194 195 196 197 198
static void stage2_flush_memslot(struct kvm *kvm,
				 struct kvm_memory_slot *memslot)
{
	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;

199
	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
200 201 202 203 204 205 206 207 208
}

/**
 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
 * @kvm: The struct kvm pointer
 *
 * Go through the stage 2 page tables and invalidate any cache lines
 * backing memory already mapped to the VM.
 */
209
static void stage2_flush_vm(struct kvm *kvm)
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	int idx;

	idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);

	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots)
		stage2_flush_memslot(kvm, memslot);

	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);
}

226
/**
227
 * free_hyp_pgds - free Hyp-mode page tables
228
 */
229
void free_hyp_pgds(void)
230
{
231
	mutex_lock(&kvm_hyp_pgd_mutex);
232 233 234
	if (hyp_pgtable) {
		kvm_pgtable_hyp_destroy(hyp_pgtable);
		kfree(hyp_pgtable);
235
		hyp_pgtable = NULL;
236
	}
237 238 239
	mutex_unlock(&kvm_hyp_pgd_mutex);
}

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
static bool kvm_host_owns_hyp_mappings(void)
{
	if (static_branch_likely(&kvm_protected_mode_initialized))
		return false;

	/*
	 * This can happen at boot time when __create_hyp_mappings() is called
	 * after the hyp protection has been enabled, but the static key has
	 * not been flipped yet.
	 */
	if (!hyp_pgtable && is_protected_kvm_enabled())
		return false;

	WARN_ON(!hyp_pgtable);

	return true;
}

258 259
static int __create_hyp_mappings(unsigned long start, unsigned long size,
				 unsigned long phys, enum kvm_pgtable_prot prot)
260
{
261
	int err;
262

263 264
	if (WARN_ON(!kvm_host_owns_hyp_mappings()))
		return -EINVAL;
265

266
	mutex_lock(&kvm_hyp_pgd_mutex);
267
	err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
268
	mutex_unlock(&kvm_hyp_pgd_mutex);
269

270 271 272
	return err;
}

273 274 275 276 277 278 279 280 281 282 283
static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
{
	if (!is_vmalloc_addr(kaddr)) {
		BUG_ON(!virt_addr_valid(kaddr));
		return __pa(kaddr);
	} else {
		return page_to_phys(vmalloc_to_page(kaddr)) +
		       offset_in_page(kaddr);
	}
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
{
	phys_addr_t addr;
	int ret;

	for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
		ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
					__phys_to_pfn(addr));
		if (ret)
			return ret;
	}

	return 0;
}

299
/**
300
 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
301 302
 * @from:	The virtual kernel start address of the range
 * @to:		The virtual kernel end address of the range (exclusive)
303
 * @prot:	The protection to be applied to this range
304
 *
305 306 307
 * The same virtual address as the kernel virtual address is also used
 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
 * physical pages.
308
 */
309
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
310
{
311 312
	phys_addr_t phys_addr;
	unsigned long virt_addr;
M
Marc Zyngier 已提交
313 314
	unsigned long start = kern_hyp_va((unsigned long)from);
	unsigned long end = kern_hyp_va((unsigned long)to);
315

316 317 318
	if (is_kernel_in_hyp_mode())
		return 0;

319 320 321 322 323 324 325
	if (!kvm_host_owns_hyp_mappings()) {
		if (WARN_ON(prot != PAGE_HYP))
			return -EPERM;
		return pkvm_share_hyp(kvm_kaddr_to_phys(from),
				      kvm_kaddr_to_phys(to));
	}

326 327
	start = start & PAGE_MASK;
	end = PAGE_ALIGN(end);
328

329 330
	for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
		int err;
331

332
		phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
333
		err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
334
					    prot);
335 336 337 338 339
		if (err)
			return err;
	}

	return 0;
340 341
}

342
static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
343 344
					unsigned long *haddr,
					enum kvm_pgtable_prot prot)
345
{
346 347
	unsigned long base;
	int ret = 0;
348

349 350 351 352 353 354 355 356 357 358
	if (!kvm_host_owns_hyp_mappings()) {
		base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
					 phys_addr, size, prot);
		if (IS_ERR_OR_NULL((void *)base))
			return PTR_ERR((void *)base);
		*haddr = base;

		return 0;
	}

359
	mutex_lock(&kvm_hyp_pgd_mutex);
360

361
	/*
F
Fuad Tabba 已提交
362
	 * This assumes that we have enough space below the idmap
363 364 365 366 367 368 369 370
	 * page to allocate our VAs. If not, the check below will
	 * kick. A potential alternative would be to detect that
	 * overflow and switch to an allocation above the idmap.
	 *
	 * The allocated size is always a multiple of PAGE_SIZE.
	 */
	size = PAGE_ALIGN(size + offset_in_page(phys_addr));
	base = io_map_base - size;
371

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	/*
	 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
	 * allocating the new area, as it would indicate we've
	 * overflowed the idmap/IO address range.
	 */
	if ((base ^ io_map_base) & BIT(VA_BITS - 1))
		ret = -ENOMEM;
	else
		io_map_base = base;

	mutex_unlock(&kvm_hyp_pgd_mutex);

	if (ret)
		goto out;

387
	ret = __create_hyp_mappings(base, size, phys_addr, prot);
388 389 390
	if (ret)
		goto out;

391
	*haddr = base + offset_in_page(phys_addr);
392
out:
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	return ret;
}

/**
 * create_hyp_io_mappings - Map IO into both kernel and HYP
 * @phys_addr:	The physical start address which gets mapped
 * @size:	Size of the region being mapped
 * @kaddr:	Kernel VA for this mapping
 * @haddr:	HYP VA for this mapping
 */
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
			   void __iomem **kaddr,
			   void __iomem **haddr)
{
	unsigned long addr;
	int ret;

	*kaddr = ioremap(phys_addr, size);
	if (!*kaddr)
		return -ENOMEM;

	if (is_kernel_in_hyp_mode()) {
		*haddr = *kaddr;
		return 0;
	}

	ret = __create_hyp_private_mapping(phys_addr, size,
					   &addr, PAGE_HYP_DEVICE);
421 422 423
	if (ret) {
		iounmap(*kaddr);
		*kaddr = NULL;
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
		*haddr = NULL;
		return ret;
	}

	*haddr = (void __iomem *)addr;
	return 0;
}

/**
 * create_hyp_exec_mappings - Map an executable range into HYP
 * @phys_addr:	The physical start address which gets mapped
 * @size:	Size of the region being mapped
 * @haddr:	HYP VA for this mapping
 */
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
			     void **haddr)
{
	unsigned long addr;
	int ret;

	BUG_ON(is_kernel_in_hyp_mode());

	ret = __create_hyp_private_mapping(phys_addr, size,
					   &addr, PAGE_HYP_EXEC);
	if (ret) {
		*haddr = NULL;
450 451 452
		return ret;
	}

453
	*haddr = (void *)addr;
454
	return 0;
455 456
}

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
	/* We shouldn't need any other callback to walk the PT */
	.phys_to_virt		= kvm_host_va,
};

static int get_user_mapping_size(struct kvm *kvm, u64 addr)
{
	struct kvm_pgtable pgt = {
		.pgd		= (kvm_pte_t *)kvm->mm->pgd,
		.ia_bits	= VA_BITS,
		.start_level	= (KVM_PGTABLE_MAX_LEVELS -
				   CONFIG_PGTABLE_LEVELS),
		.mm_ops		= &kvm_user_mm_ops,
	};
	kvm_pte_t pte = 0;	/* Keep GCC quiet... */
	u32 level = ~0;
	int ret;

	ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
	VM_BUG_ON(ret);
	VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
	VM_BUG_ON(!(pte & PTE_VALID));

	return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
}

483 484 485 486 487 488 489 490 491
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
	.zalloc_page		= stage2_memcache_zalloc_page,
	.zalloc_pages_exact	= kvm_host_zalloc_pages_exact,
	.free_pages_exact	= free_pages_exact,
	.get_page		= kvm_host_get_page,
	.put_page		= kvm_host_put_page,
	.page_count		= kvm_host_page_count,
	.phys_to_virt		= kvm_host_va,
	.virt_to_phys		= kvm_host_pa,
492 493
	.dcache_clean_inval_poc	= clean_dcache_guest_page,
	.icache_inval_pou	= invalidate_icache_guest_page,
494 495
};

496
/**
497 498 499
 * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
 * @kvm:	The pointer to the KVM structure
 * @mmu:	The pointer to the s2 MMU structure
500
 *
501
 * Allocates only the stage-2 HW PGD level table(s).
502 503 504
 * Note we don't need locking here as this is only called when the VM is
 * created, which can only be done once.
 */
505
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
506
{
507 508
	int cpu, err;
	struct kvm_pgtable *pgt;
509

510
	if (mmu->pgt != NULL) {
511 512 513 514
		kvm_err("kvm_arch already initialized?\n");
		return -EINVAL;
	}

515 516
	pgt = kzalloc(sizeof(*pgt), GFP_KERNEL);
	if (!pgt)
517 518
		return -ENOMEM;

519
	err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
520 521
	if (err)
		goto out_free_pgtable;
522

523 524
	mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
	if (!mmu->last_vcpu_ran) {
525 526
		err = -ENOMEM;
		goto out_destroy_pgtable;
527 528 529 530 531
	}

	for_each_possible_cpu(cpu)
		*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;

532
	mmu->arch = &kvm->arch;
533 534
	mmu->pgt = pgt;
	mmu->pgd_phys = __pa(pgt->pgd);
535
	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
536
	return 0;
537 538 539 540 541 542

out_destroy_pgtable:
	kvm_pgtable_stage2_destroy(pgt);
out_free_pgtable:
	kfree(pgt);
	return err;
543 544
}

545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
static void stage2_unmap_memslot(struct kvm *kvm,
				 struct kvm_memory_slot *memslot)
{
	hva_t hva = memslot->userspace_addr;
	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
	phys_addr_t size = PAGE_SIZE * memslot->npages;
	hva_t reg_end = hva + size;

	/*
	 * A memory region could potentially cover multiple VMAs, and any holes
	 * between them, so iterate over all of them to find out if we should
	 * unmap any of them.
	 *
	 *     +--------------------------------------------+
	 * +---------------+----------------+   +----------------+
	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
	 * +---------------+----------------+   +----------------+
	 *     |               memory region                |
	 *     +--------------------------------------------+
	 */
	do {
566
		struct vm_area_struct *vma;
567 568
		hva_t vm_start, vm_end;

569 570
		vma = find_vma_intersection(current->mm, hva, reg_end);
		if (!vma)
571 572 573 574 575 576 577 578 579 580
			break;

		/*
		 * Take the intersection of this VMA with the memory region
		 */
		vm_start = max(hva, vma->vm_start);
		vm_end = min(reg_end, vma->vm_end);

		if (!(vma->vm_flags & VM_PFNMAP)) {
			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
581
			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
582 583 584 585 586 587 588 589 590
		}
		hva = vm_end;
	} while (hva < reg_end);
}

/**
 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
 * @kvm: The struct kvm pointer
 *
F
Fuad Tabba 已提交
591
 * Go through the memregions and unmap any regular RAM
592 593 594 595 596 597 598 599 600
 * backing memory already mapped to the VM.
 */
void stage2_unmap_vm(struct kvm *kvm)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	int idx;

	idx = srcu_read_lock(&kvm->srcu);
601
	mmap_read_lock(current->mm);
602 603 604 605 606 607 608
	spin_lock(&kvm->mmu_lock);

	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots)
		stage2_unmap_memslot(kvm, memslot);

	spin_unlock(&kvm->mmu_lock);
609
	mmap_read_unlock(current->mm);
610 611 612
	srcu_read_unlock(&kvm->srcu, idx);
}

613
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
614
{
615
	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
616
	struct kvm_pgtable *pgt = NULL;
617

618
	spin_lock(&kvm->mmu_lock);
619 620 621 622 623
	pgt = mmu->pgt;
	if (pgt) {
		mmu->pgd_phys = 0;
		mmu->pgt = NULL;
		free_percpu(mmu->last_vcpu_ran);
624
	}
625 626
	spin_unlock(&kvm->mmu_lock);

627 628 629
	if (pgt) {
		kvm_pgtable_stage2_destroy(pgt);
		kfree(pgt);
630
	}
631 632 633 634 635 636 637 638 639
}

/**
 * kvm_phys_addr_ioremap - map a device range to guest IPA
 *
 * @kvm:	The KVM pointer
 * @guest_ipa:	The IPA at which to insert the mapping
 * @pa:		The physical address of the device
 * @size:	The size of the mapping
640
 * @writable:   Whether or not to create a writable mapping
641 642
 */
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
643
			  phys_addr_t pa, unsigned long size, bool writable)
644
{
645
	phys_addr_t addr;
646
	int ret = 0;
647
	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
648 649 650 651
	struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
				     KVM_PGTABLE_PROT_R |
				     (writable ? KVM_PGTABLE_PROT_W : 0);
652

653 654
	size += offset_in_page(guest_ipa);
	guest_ipa &= PAGE_MASK;
655

656
	for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
657 658
		ret = kvm_mmu_topup_memory_cache(&cache,
						 kvm_mmu_cache_min_pages(kvm));
659
		if (ret)
660 661
			break;

662
		spin_lock(&kvm->mmu_lock);
663 664
		ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
					     &cache);
665 666
		spin_unlock(&kvm->mmu_lock);
		if (ret)
667
			break;
668

669
		pa += PAGE_SIZE;
670 671
	}

672
	kvm_mmu_free_memory_cache(&cache);
673 674 675
	return ret;
}

676 677
/**
 * stage2_wp_range() - write protect stage2 memory region range
678
 * @mmu:        The KVM stage-2 MMU pointer
679 680 681
 * @addr:	Start address of range
 * @end:	End address of range
 */
682
static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
683
{
684
	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
685
	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
686 687 688 689 690 691 692 693 694
}

/**
 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
 * @kvm:	The KVM pointer
 * @slot:	The memory slot to write protect
 *
 * Called to start logging dirty pages after memory region
 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
695
 * all present PUD, PMD and PTEs are write protected in the memory region.
696 697 698 699 700
 * Afterwards read of dirty page log can be called.
 *
 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
 * serializing operations for VM memory regions.
 */
701
static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
702
{
703 704
	struct kvm_memslots *slots = kvm_memslots(kvm);
	struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
705 706 707 708 709 710 711
	phys_addr_t start, end;

	if (WARN_ON_ONCE(!memslot))
		return;

	start = memslot->base_gfn << PAGE_SHIFT;
	end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
712 713

	spin_lock(&kvm->mmu_lock);
714
	stage2_wp_range(&kvm->arch.mmu, start, end);
715 716 717
	spin_unlock(&kvm->mmu_lock);
	kvm_flush_remote_tlbs(kvm);
}
718 719

/**
720
 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
721 722 723 724 725 726 727 728 729
 * @kvm:	The KVM pointer
 * @slot:	The memory slot associated with mask
 * @gfn_offset:	The gfn offset in memory slot
 * @mask:	The mask of dirty pages at offset 'gfn_offset' in this memory
 *		slot to be write protected
 *
 * Walks bits set in mask write protects the associated pte's. Caller must
 * acquire kvm_mmu_lock.
 */
730
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
731 732 733 734 735 736 737
		struct kvm_memory_slot *slot,
		gfn_t gfn_offset, unsigned long mask)
{
	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;

738
	stage2_wp_range(&kvm->arch.mmu, start, end);
739
}
740

741 742 743 744 745 746 747 748 749 750 751 752 753 754
/*
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * dirty pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
		struct kvm_memory_slot *slot,
		gfn_t gfn_offset, unsigned long mask)
{
	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}

755
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
756
{
757
	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
758 759
}

760 761 762
static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
					       unsigned long hva,
					       unsigned long map_size)
763
{
764
	gpa_t gpa_start;
765 766 767
	hva_t uaddr_start, uaddr_end;
	size_t size;

768 769 770 771
	/* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
	if (map_size == PAGE_SIZE)
		return true;

772 773 774 775 776 777 778 779 780
	size = memslot->npages * PAGE_SIZE;

	gpa_start = memslot->base_gfn << PAGE_SHIFT;

	uaddr_start = memslot->userspace_addr;
	uaddr_end = uaddr_start + size;

	/*
	 * Pages belonging to memslots that don't have the same alignment
781 782
	 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
	 * PMD/PUD entries, because we'll end up mapping the wrong pages.
783 784 785 786 787
	 *
	 * Consider a layout like the following:
	 *
	 *    memslot->userspace_addr:
	 *    +-----+--------------------+--------------------+---+
788
	 *    |abcde|fgh  Stage-1 block  |    Stage-1 block tv|xyz|
789 790
	 *    +-----+--------------------+--------------------+---+
	 *
791
	 *    memslot->base_gfn << PAGE_SHIFT:
792
	 *      +---+--------------------+--------------------+-----+
793
	 *      |abc|def  Stage-2 block  |    Stage-2 block   |tvxyz|
794 795
	 *      +---+--------------------+--------------------+-----+
	 *
796
	 * If we create those stage-2 blocks, we'll end up with this incorrect
797 798 799 800 801
	 * mapping:
	 *   d -> f
	 *   e -> g
	 *   f -> h
	 */
802
	if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
803 804 805 806
		return false;

	/*
	 * Next, let's make sure we're not trying to map anything not covered
807 808
	 * by the memslot. This means we have to prohibit block size mappings
	 * for the beginning and end of a non-block aligned and non-block sized
809 810 811 812 813 814 815 816
	 * memory slot (illustrated by the head and tail parts of the
	 * userspace view above containing pages 'abcde' and 'xyz',
	 * respectively).
	 *
	 * Note that it doesn't matter if we do the check using the
	 * userspace_addr or the base_gfn, as both are equally aligned (per
	 * the check above) and equally sized.
	 */
817 818
	return (hva & ~(map_size - 1)) >= uaddr_start &&
	       (hva & ~(map_size - 1)) + map_size <= uaddr_end;
819 820
}

821 822 823 824 825 826 827 828 829
/*
 * Check if the given hva is backed by a transparent huge page (THP) and
 * whether it can be mapped using block mapping in stage2. If so, adjust
 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
 * supported. This will need to be updated to support other THP sizes.
 *
 * Returns the size of the mapping.
 */
static unsigned long
830
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
831 832 833 834 835 836 837 838 839 840
			    unsigned long hva, kvm_pfn_t *pfnp,
			    phys_addr_t *ipap)
{
	kvm_pfn_t pfn = *pfnp;

	/*
	 * Make sure the adjustment is done only for THP pages. Also make
	 * sure that the HVA and IPA are sufficiently aligned and that the
	 * block map is contained within the memslot.
	 */
841 842
	if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
	    get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
		/*
		 * The address we faulted on is backed by a transparent huge
		 * page.  However, because we map the compound huge page and
		 * not the individual tail page, we need to transfer the
		 * refcount to the head page.  We have to be careful that the
		 * THP doesn't start to split while we are adjusting the
		 * refcounts.
		 *
		 * We are sure this doesn't happen, because mmu_notifier_retry
		 * was successful and we are holding the mmu_lock, so if this
		 * THP is trying to split, it will be blocked in the mmu
		 * notifier before touching any of the pages, specifically
		 * before being able to call __split_huge_page_refcount().
		 *
		 * We can therefore safely transfer the refcount from PG_tail
		 * to PG_head and switch the pfn from a tail page to the head
		 * page accordingly.
		 */
		*ipap &= PMD_MASK;
		kvm_release_pfn_clean(pfn);
		pfn &= ~(PTRS_PER_PMD - 1);
864
		get_page(pfn_to_page(pfn));
865 866 867 868 869 870 871 872 873
		*pfnp = pfn;

		return PMD_SIZE;
	}

	/* Use page mapping if we cannot use block mapping. */
	return PAGE_SIZE;
}

874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
{
	unsigned long pa;

	if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
		return huge_page_shift(hstate_vma(vma));

	if (!(vma->vm_flags & VM_PFNMAP))
		return PAGE_SHIFT;

	VM_BUG_ON(is_vm_hugetlb_page(vma));

	pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);

#ifndef __PAGETABLE_PMD_FOLDED
	if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
	    ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
	    ALIGN(hva, PUD_SIZE) <= vma->vm_end)
		return PUD_SHIFT;
#endif

	if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
	    ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
	    ALIGN(hva, PMD_SIZE) <= vma->vm_end)
		return PMD_SHIFT;

	return PAGE_SHIFT;
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
/*
 * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
 * able to see the page's tags and therefore they must be initialised first. If
 * PG_mte_tagged is set, tags have already been initialised.
 *
 * The race in the test/set of the PG_mte_tagged flag is handled by:
 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
 *   racing to santise the same page
 * - mmap_lock protects between a VM faulting a page in and the VMM performing
 *   an mprotect() to add VM_MTE
 */
static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
			     unsigned long size)
{
	unsigned long i, nr_pages = size >> PAGE_SHIFT;
	struct page *page;

	if (!kvm_has_mte(kvm))
		return 0;

	/*
	 * pfn_to_online_page() is used to reject ZONE_DEVICE pages
	 * that may not support tags.
	 */
	page = pfn_to_online_page(pfn);

	if (!page)
		return -EFAULT;

	for (i = 0; i < nr_pages; i++, page++) {
		if (!test_bit(PG_mte_tagged, &page->flags)) {
			mte_clear_page_tags(page_address(page));
			set_bit(PG_mte_tagged, &page->flags);
		}
	}

	return 0;
}

942
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
943
			  struct kvm_memory_slot *memslot, unsigned long hva,
944 945
			  unsigned long fault_status)
{
946
	int ret = 0;
947
	bool write_fault, writable, force_pte = false;
948 949
	bool exec_fault;
	bool device = false;
950
	bool shared;
951
	unsigned long mmu_seq;
952
	struct kvm *kvm = vcpu->kvm;
953
	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
954
	struct vm_area_struct *vma;
955
	short vma_shift;
956
	gfn_t gfn;
D
Dan Williams 已提交
957
	kvm_pfn_t pfn;
958
	bool logging_active = memslot_is_logging(memslot);
959 960
	unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
	unsigned long vma_pagesize, fault_granule;
961 962
	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
	struct kvm_pgtable *pgt;
963

964
	fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
965
	write_fault = kvm_is_write_fault(vcpu);
966
	exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
967 968 969
	VM_BUG_ON(write_fault && exec_fault);

	if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
970 971 972 973
		kvm_err("Unexpected L2 read permission error\n");
		return -EFAULT;
	}

974 975 976 977
	/*
	 * Let's check if we will get back a huge page backed by hugetlbfs, or
	 * get block mapping for device MMIO region.
	 */
978
	mmap_read_lock(current->mm);
979
	vma = vma_lookup(current->mm, hva);
980 981
	if (unlikely(!vma)) {
		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
982
		mmap_read_unlock(current->mm);
983 984 985
		return -EFAULT;
	}

986 987 988 989 990
	/*
	 * logging_active is guaranteed to never be true for VM_PFNMAP
	 * memslots.
	 */
	if (logging_active) {
991
		force_pte = true;
992
		vma_shift = PAGE_SHIFT;
993 994
	} else {
		vma_shift = get_vma_page_shift(vma, hva);
995 996
	}

997
	shared = (vma->vm_flags & VM_SHARED);
998

999
	switch (vma_shift) {
1000
#ifndef __PAGETABLE_PMD_FOLDED
1001 1002 1003 1004
	case PUD_SHIFT:
		if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
			break;
		fallthrough;
1005
#endif
1006 1007 1008 1009 1010 1011 1012 1013
	case CONT_PMD_SHIFT:
		vma_shift = PMD_SHIFT;
		fallthrough;
	case PMD_SHIFT:
		if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
			break;
		fallthrough;
	case CONT_PTE_SHIFT:
1014
		vma_shift = PAGE_SHIFT;
1015 1016 1017 1018 1019 1020
		force_pte = true;
		fallthrough;
	case PAGE_SHIFT:
		break;
	default:
		WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
1021 1022
	}

1023
	vma_pagesize = 1UL << vma_shift;
1024
	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
1025
		fault_ipa &= ~(vma_pagesize - 1);
1026 1027

	gfn = fault_ipa >> PAGE_SHIFT;
1028
	mmap_read_unlock(current->mm);
1029

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	/*
	 * Permission faults just need to update the existing leaf entry,
	 * and so normally don't require allocations from the memcache. The
	 * only exception to this is when dirty logging is enabled at runtime
	 * and a write fault needs to collapse a block entry into a table.
	 */
	if (fault_status != FSC_PERM || (logging_active && write_fault)) {
		ret = kvm_mmu_topup_memory_cache(memcache,
						 kvm_mmu_cache_min_pages(kvm));
		if (ret)
			return ret;
	}
1042 1043 1044 1045 1046 1047 1048

	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	/*
	 * Ensure the read of mmu_notifier_seq happens before we call
	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
	 * the page we just got a reference to gets unmapped before we have a
	 * chance to grab the mmu_lock, which ensure that if the page gets
1049
	 * unmapped afterwards, the call to kvm_unmap_gfn will take it away
1050 1051
	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1052 1053 1054 1055
	 *
	 * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
	 * used to avoid unnecessary overhead introduced to locate the memory
	 * slot because it's always fixed even @gfn is adjusted for huge pages.
1056 1057 1058
	 */
	smp_rmb();

1059 1060
	pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
				   write_fault, &writable, NULL);
1061
	if (pfn == KVM_PFN_ERR_HWPOISON) {
1062
		kvm_send_hwpoison_signal(hva, vma_shift);
1063 1064
		return 0;
	}
1065
	if (is_error_noslot_pfn(pfn))
1066 1067
		return -EFAULT;

1068
	if (kvm_is_device_pfn(pfn)) {
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
		/*
		 * If the page was identified as device early by looking at
		 * the VMA flags, vma_pagesize is already representing the
		 * largest quantity we can map.  If instead it was mapped
		 * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
		 * and must not be upgraded.
		 *
		 * In both cases, we don't let transparent_hugepage_adjust()
		 * change things at the last minute.
		 */
1079 1080
		device = true;
	} else if (logging_active && !write_fault) {
1081 1082 1083 1084
		/*
		 * Only actually map the page as writable if this was a write
		 * fault.
		 */
1085
		writable = false;
1086
	}
1087

1088
	if (exec_fault && device)
1089 1090
		return -ENOEXEC;

1091
	spin_lock(&kvm->mmu_lock);
1092
	pgt = vcpu->arch.hw_mmu->pgt;
1093
	if (mmu_notifier_retry(kvm, mmu_seq))
1094
		goto out_unlock;
1095

1096 1097 1098 1099
	/*
	 * If we are not forced to use page mapping, check if we are
	 * backed by a THP and thus use block mapping if possible.
	 */
1100 1101 1102 1103 1104 1105 1106 1107
	if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
		if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
			vma_pagesize = fault_granule;
		else
			vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
								   hva, &pfn,
								   &fault_ipa);
	}
1108

1109
	if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
1110
		/* Check the VMM hasn't introduced a new VM_SHARED VMA */
1111 1112 1113
		if (!shared)
			ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
		else
1114 1115 1116 1117
			ret = -EFAULT;
		if (ret)
			goto out_unlock;
	}
1118

1119
	if (writable)
1120
		prot |= KVM_PGTABLE_PROT_W;
1121

1122
	if (exec_fault)
1123
		prot |= KVM_PGTABLE_PROT_X;
1124

1125 1126 1127 1128
	if (device)
		prot |= KVM_PGTABLE_PROT_DEVICE;
	else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
		prot |= KVM_PGTABLE_PROT_X;
1129

1130 1131 1132 1133 1134 1135
	/*
	 * Under the premise of getting a FSC_PERM fault, we just need to relax
	 * permissions only if vma_pagesize equals fault_granule. Otherwise,
	 * kvm_pgtable_stage2_map() should be called to change block size.
	 */
	if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
1136
		ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1137
	} else {
1138 1139 1140
		ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
					     __pfn_to_phys(pfn), prot,
					     memcache);
1141
	}
1142

1143 1144 1145
	/* Mark the page dirty only if the fault is handled successfully */
	if (writable && !ret) {
		kvm_set_pfn_dirty(pfn);
1146
		mark_page_dirty_in_slot(kvm, memslot, gfn);
1147 1148
	}

1149
out_unlock:
1150
	spin_unlock(&kvm->mmu_lock);
1151
	kvm_set_pfn_accessed(pfn);
1152
	kvm_release_pfn_clean(pfn);
1153
	return ret != -EAGAIN ? ret : 0;
1154 1155
}

1156
/* Resolve the access fault by making the page young again. */
1157 1158
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
1159 1160 1161
	pte_t pte;
	kvm_pte_t kpte;
	struct kvm_s2_mmu *mmu;
1162 1163 1164 1165

	trace_kvm_access_fault(fault_ipa);

	spin_lock(&vcpu->kvm->mmu_lock);
1166 1167
	mmu = vcpu->arch.hw_mmu;
	kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
1168
	spin_unlock(&vcpu->kvm->mmu_lock);
1169 1170 1171 1172

	pte = __pte(kpte);
	if (pte_valid(pte))
		kvm_set_pfn_accessed(pte_pfn(pte));
1173 1174
}

1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
/**
 * kvm_handle_guest_abort - handles all 2nd stage aborts
 * @vcpu:	the VCPU pointer
 *
 * Any abort that gets to the host is almost guaranteed to be caused by a
 * missing second stage translation table entry, which can mean that either the
 * guest simply needs more memory and we must allocate an appropriate page or it
 * can mean that the guest tried to access I/O memory, which is emulated by user
 * space. The distinction is based on the IPA causing the fault and whether this
 * memory region has been registered as standard RAM by user space.
 */
1186
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1187
{
1188 1189 1190
	unsigned long fault_status;
	phys_addr_t fault_ipa;
	struct kvm_memory_slot *memslot;
1191 1192
	unsigned long hva;
	bool is_iabt, write_fault, writable;
1193 1194 1195
	gfn_t gfn;
	int ret, idx;

1196 1197 1198
	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);

	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1199
	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1200

1201
	/* Synchronous External Abort? */
1202
	if (kvm_vcpu_abt_issea(vcpu)) {
1203 1204 1205 1206
		/*
		 * For RAS the host kernel may handle this abort.
		 * There is no need to pass the error into the guest.
		 */
1207
		if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1208
			kvm_inject_vabt(vcpu);
1209 1210

		return 1;
1211 1212
	}

G
Gavin Shan 已提交
1213
	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
1214
			      kvm_vcpu_get_hfar(vcpu), fault_ipa);
1215 1216

	/* Check the stage-2 fault is trans. fault or write fault */
1217 1218
	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
	    fault_status != FSC_ACCESS) {
1219 1220 1221
		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
			kvm_vcpu_trap_get_class(vcpu),
			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
G
Gavin Shan 已提交
1222
			(unsigned long)kvm_vcpu_get_esr(vcpu));
1223 1224 1225 1226 1227 1228
		return -EFAULT;
	}

	idx = srcu_read_lock(&vcpu->kvm->srcu);

	gfn = fault_ipa >> PAGE_SHIFT;
1229 1230
	memslot = gfn_to_memslot(vcpu->kvm, gfn);
	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1231
	write_fault = kvm_is_write_fault(vcpu);
1232
	if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1233 1234 1235 1236 1237 1238
		/*
		 * The guest has put either its instructions or its page-tables
		 * somewhere it shouldn't have. Userspace won't be able to do
		 * anything about this (there's no syndrome for a start), so
		 * re-inject the abort back into the guest.
		 */
1239
		if (is_iabt) {
1240 1241
			ret = -ENOEXEC;
			goto out;
1242 1243
		}

1244
		if (kvm_vcpu_abt_iss1tw(vcpu)) {
1245 1246 1247 1248 1249
			kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
			ret = 1;
			goto out_unlock;
		}

1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
		/*
		 * Check for a cache maintenance operation. Since we
		 * ended-up here, we know it is outside of any memory
		 * slot. But we can't find out if that is for a device,
		 * or if the guest is just being stupid. The only thing
		 * we know for sure is that this range cannot be cached.
		 *
		 * So let's assume that the guest is just being
		 * cautious, and skip the instruction.
		 */
1260
		if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1261
			kvm_incr_pc(vcpu);
1262 1263 1264 1265
			ret = 1;
			goto out_unlock;
		}

M
Marc Zyngier 已提交
1266 1267 1268 1269 1270 1271 1272
		/*
		 * The IPA is reported as [MAX:12], so we need to
		 * complement it with the bottom 12 bits from the
		 * faulting VA. This is always 12 bits, irrespective
		 * of the page size.
		 */
		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1273
		ret = io_mem_abort(vcpu, fault_ipa);
1274 1275 1276
		goto out_unlock;
	}

1277
	/* Userspace should not be able to register out-of-bounds IPAs */
1278
	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1279

1280 1281 1282 1283 1284 1285
	if (fault_status == FSC_ACCESS) {
		handle_access_fault(vcpu, fault_ipa);
		ret = 1;
		goto out_unlock;
	}

1286
	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1287 1288
	if (ret == 0)
		ret = 1;
1289 1290 1291 1292 1293
out:
	if (ret == -ENOEXEC) {
		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
		ret = 1;
	}
1294 1295 1296
out_unlock:
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
	return ret;
1297 1298
}

1299
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1300
{
1301
	if (!kvm->arch.mmu.pgt)
1302
		return false;
1303

1304 1305 1306
	__unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
			     (range->end - range->start) << PAGE_SHIFT,
			     range->may_block);
1307

1308
	return false;
1309 1310
}

1311
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1312
{
1313
	kvm_pfn_t pfn = pte_pfn(range->pte);
1314
	int ret;
1315

1316
	if (!kvm->arch.mmu.pgt)
1317
		return false;
1318

1319
	WARN_ON(range->end - range->start != 1);
1320

1321 1322 1323 1324
	ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
	if (ret)
		return false;

1325
	/*
1326 1327 1328 1329
	 * We've moved a page around, probably through CoW, so let's treat
	 * it just like a translation fault and the map handler will clean
	 * the cache to the PoC.
	 *
1330
	 * The MMU notifiers will have unmapped a huge PMD before calling
1331
	 * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
1332 1333
	 * therefore we never need to clear out a huge PMD through this
	 * calling path and a memcache is not required.
1334
	 */
1335 1336 1337 1338
	kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
			       PAGE_SIZE, __pfn_to_phys(pfn),
			       KVM_PGTABLE_PROT_R, NULL);

1339
	return false;
1340 1341
}

1342
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1343
{
1344 1345 1346
	u64 size = (range->end - range->start) << PAGE_SHIFT;
	kvm_pte_t kpte;
	pte_t pte;
1347

1348
	if (!kvm->arch.mmu.pgt)
1349
		return false;
1350

1351
	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1352 1353 1354

	kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
					range->start << PAGE_SHIFT);
1355 1356
	pte = __pte(kpte);
	return pte_valid(pte) && pte_young(pte);
1357 1358
}

1359
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1360
{
1361
	if (!kvm->arch.mmu.pgt)
1362
		return false;
1363

1364 1365
	return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
					   range->start << PAGE_SHIFT);
1366 1367
}

1368 1369
phys_addr_t kvm_mmu_get_httbr(void)
{
1370
	return __pa(hyp_pgtable->pgd);
1371 1372
}

1373 1374 1375 1376 1377
phys_addr_t kvm_get_idmap_vector(void)
{
	return hyp_idmap_vector;
}

1378
static int kvm_map_idmap_text(void)
1379
{
1380 1381 1382
	unsigned long size = hyp_idmap_end - hyp_idmap_start;
	int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
					PAGE_HYP_EXEC);
1383 1384 1385 1386 1387 1388 1389
	if (err)
		kvm_err("Failed to idmap %lx-%lx\n",
			hyp_idmap_start, hyp_idmap_end);

	return err;
}

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
static void *kvm_hyp_zalloc_page(void *arg)
{
	return (void *)get_zeroed_page(GFP_KERNEL);
}

static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
	.zalloc_page		= kvm_hyp_zalloc_page,
	.get_page		= kvm_host_get_page,
	.put_page		= kvm_host_put_page,
	.phys_to_virt		= kvm_host_va,
	.virt_to_phys		= kvm_host_pa,
};

1403
int kvm_mmu_init(u32 *hyp_va_bits)
1404
{
1405 1406
	int err;

1407
	hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
1408
	hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
1409
	hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
1410
	hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
1411
	hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
1412

1413 1414 1415 1416 1417
	/*
	 * We rely on the linker script to ensure at build time that the HYP
	 * init code does not cross a page boundary.
	 */
	BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1418

1419 1420
	*hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
	kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
1421 1422 1423 1424
	kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
	kvm_debug("HYP VA range: %lx:%lx\n",
		  kern_hyp_va(PAGE_OFFSET),
		  kern_hyp_va((unsigned long)high_memory - 1));
1425

M
Marc Zyngier 已提交
1426
	if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1427
	    hyp_idmap_start <  kern_hyp_va((unsigned long)high_memory - 1) &&
1428
	    hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1429 1430 1431 1432 1433 1434 1435 1436 1437
		/*
		 * The idmap page is intersecting with the VA space,
		 * it is not safe to continue further.
		 */
		kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
		err = -EINVAL;
		goto out;
	}

1438 1439 1440
	hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
	if (!hyp_pgtable) {
		kvm_err("Hyp mode page-table not allocated\n");
1441 1442 1443 1444
		err = -ENOMEM;
		goto out;
	}

1445
	err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
1446 1447
	if (err)
		goto out_free_pgtable;
1448

1449 1450 1451
	err = kvm_map_idmap_text();
	if (err)
		goto out_destroy_pgtable;
1452

1453
	io_map_base = hyp_idmap_start;
1454
	return 0;
1455 1456 1457 1458 1459 1460

out_destroy_pgtable:
	kvm_pgtable_hyp_destroy(hyp_pgtable);
out_free_pgtable:
	kfree(hyp_pgtable);
	hyp_pgtable = NULL;
1461 1462
out:
	return err;
1463
}
1464 1465

void kvm_arch_commit_memory_region(struct kvm *kvm,
1466
				   const struct kvm_userspace_memory_region *mem,
1467
				   struct kvm_memory_slot *old,
1468
				   const struct kvm_memory_slot *new,
1469 1470
				   enum kvm_mr_change change)
{
1471 1472
	/*
	 * At this point memslot has been committed and there is an
F
Fuad Tabba 已提交
1473
	 * allocated dirty_bitmap[], dirty pages will be tracked while the
1474 1475
	 * memory slot is write protected.
	 */
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
		/*
		 * If we're with initial-all-set, we don't need to write
		 * protect any pages because they're all reported as dirty.
		 * Huge pages and normal pages will be write protect gradually.
		 */
		if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
			kvm_mmu_wp_memory_region(kvm, mem->slot);
		}
	}
1486 1487 1488 1489
}

int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   struct kvm_memory_slot *memslot,
1490
				   const struct kvm_userspace_memory_region *mem,
1491 1492
				   enum kvm_mr_change change)
{
1493 1494 1495 1496
	hva_t hva = mem->userspace_addr;
	hva_t reg_end = hva + mem->memory_size;
	int ret = 0;

1497 1498
	if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
			change != KVM_MR_FLAGS_ONLY)
1499 1500
		return 0;

1501 1502 1503 1504
	/*
	 * Prevent userspace from creating a memory region outside of the IPA
	 * space addressable by the KVM guest IPA space.
	 */
1505
	if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
1506 1507
		return -EFAULT;

1508
	mmap_read_lock(current->mm);
1509 1510
	/*
	 * A memory region could potentially cover multiple VMAs, and any holes
1511
	 * between them, so iterate over all of them.
1512 1513 1514 1515 1516 1517 1518 1519 1520
	 *
	 *     +--------------------------------------------+
	 * +---------------+----------------+   +----------------+
	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
	 * +---------------+----------------+   +----------------+
	 *     |               memory region                |
	 *     +--------------------------------------------+
	 */
	do {
1521
		struct vm_area_struct *vma;
1522

1523 1524
		vma = find_vma_intersection(current->mm, hva, reg_end);
		if (!vma)
1525 1526
			break;

1527 1528 1529 1530 1531
		/*
		 * VM_SHARED mappings are not allowed with MTE to avoid races
		 * when updating the PG_mte_tagged page flag, see
		 * sanitise_mte_tags for more details.
		 */
1532 1533 1534 1535
		if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
			ret = -EINVAL;
			break;
		}
1536

1537
		if (vma->vm_flags & VM_PFNMAP) {
1538
			/* IO region dirty page logging not allowed */
1539 1540
			if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
				ret = -EINVAL;
1541
				break;
1542
			}
1543
		}
1544
		hva = min(reg_end, vma->vm_end);
1545 1546
	} while (hva < reg_end);

1547
	mmap_read_unlock(current->mm);
1548
	return ret;
1549 1550
}

1551
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1552 1553 1554
{
}

1555
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
1556 1557 1558 1559 1560
{
}

void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
1561
	kvm_free_stage2_pgd(&kvm->arch.mmu);
1562 1563 1564 1565 1566
}

void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot)
{
1567 1568 1569 1570
	gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
	phys_addr_t size = slot->npages << PAGE_SHIFT;

	spin_lock(&kvm->mmu_lock);
1571
	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
1572
	spin_unlock(&kvm->mmu_lock);
1573
}
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604

/*
 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 *
 * Main problems:
 * - S/W ops are local to a CPU (not broadcast)
 * - We have line migration behind our back (speculation)
 * - System caches don't support S/W at all (damn!)
 *
 * In the face of the above, the best we can do is to try and convert
 * S/W ops to VA ops. Because the guest is not allowed to infer the
 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
 * which is a rather good thing for us.
 *
 * Also, it is only used when turning caches on/off ("The expected
 * usage of the cache maintenance instructions that operate by set/way
 * is associated with the cache maintenance instructions associated
 * with the powerdown and powerup of caches, if this is required by
 * the implementation.").
 *
 * We use the following policy:
 *
 * - If we trap a S/W operation, we enable VM trapping to detect
 *   caches being turned on/off, and do a full clean.
 *
 * - We flush the caches on both caches being turned on and off.
 *
 * - Once the caches are enabled, we stop trapping VM ops.
 */
void kvm_set_way_flush(struct kvm_vcpu *vcpu)
{
1605
	unsigned long hcr = *vcpu_hcr(vcpu);
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619

	/*
	 * If this is the first time we do a S/W operation
	 * (i.e. HCR_TVM not set) flush the whole memory, and set the
	 * VM trapping.
	 *
	 * Otherwise, rely on the VM trapping to wait for the MMU +
	 * Caches to be turned off. At that point, we'll be able to
	 * clean the caches again.
	 */
	if (!(hcr & HCR_TVM)) {
		trace_kvm_set_way_flush(*vcpu_pc(vcpu),
					vcpu_has_cache_enabled(vcpu));
		stage2_flush_vm(vcpu->kvm);
1620
		*vcpu_hcr(vcpu) = hcr | HCR_TVM;
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
	}
}

void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
{
	bool now_enabled = vcpu_has_cache_enabled(vcpu);

	/*
	 * If switching the MMU+caches on, need to invalidate the caches.
	 * If switching it off, need to clean the caches.
	 * Clean + invalidate does the trick always.
	 */
	if (now_enabled != was_enabled)
		stage2_flush_vm(vcpu->kvm);

	/* Caches are now on, stop trapping VM ops (until a S/W op) */
	if (now_enabled)
1638
		*vcpu_hcr(vcpu) &= ~HCR_TVM;
1639 1640 1641

	trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
}