mmu.c 23.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */
18 19 20 21

#include <linux/mman.h>
#include <linux/kvm_host.h>
#include <linux/io.h>
22
#include <linux/hugetlb.h>
C
Christoffer Dall 已提交
23
#include <trace/events/kvm.h>
24
#include <asm/pgalloc.h>
25
#include <asm/cacheflush.h>
26 27
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
C
Christoffer Dall 已提交
28
#include <asm/kvm_mmio.h>
29
#include <asm/kvm_asm.h>
30
#include <asm/kvm_emulate.h>
31 32

#include "trace.h"
33 34 35

extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[];

36
static pgd_t *boot_hyp_pgd;
37
static pgd_t *hyp_pgd;
38 39
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);

40 41 42 43 44
static void *init_bounce_page;
static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;

45 46
#define kvm_pmd_huge(_x)	(pmd_huge(_x))

47
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
48
{
49 50 51 52 53 54 55 56
	/*
	 * This function also gets called when dealing with HYP page
	 * tables. As HYP doesn't have an associated struct kvm (and
	 * the HYP page tables are fairly static), we don't do
	 * anything there.
	 */
	if (kvm)
		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
				  int min, int max)
{
	void *page;

	BUG_ON(max > KVM_NR_MEM_OBJS);
	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < max) {
		page = (void *)__get_free_page(PGALLOC_GFP);
		if (!page)
			return -ENOMEM;
		cache->objects[cache->nobjs++] = page;
	}
	return 0;
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		free_page((unsigned long)mc->objects[--mc->nobjs]);
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
	void *p;

	BUG_ON(!mc || !mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

91 92 93 94 95 96
static bool page_empty(void *ptr)
{
	struct page *ptr_page = virt_to_page(ptr);
	return page_count(ptr_page) == 1;
}

97
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
98
{
99 100 101 102 103 104 105 106 107
	if (pud_huge(*pud)) {
		pud_clear(pud);
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	} else {
		pmd_t *pmd_table = pmd_offset(pud, 0);
		pud_clear(pud);
		kvm_tlb_flush_vmid_ipa(kvm, addr);
		pmd_free(NULL, pmd_table);
	}
108 109
	put_page(virt_to_page(pud));
}
110

111
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
112
{
113 114 115 116 117 118 119 120 121
	if (kvm_pmd_huge(*pmd)) {
		pmd_clear(pmd);
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	} else {
		pte_t *pte_table = pte_offset_kernel(pmd, 0);
		pmd_clear(pmd);
		kvm_tlb_flush_vmid_ipa(kvm, addr);
		pte_free_kernel(NULL, pte_table);
	}
122 123 124
	put_page(virt_to_page(pmd));
}

125
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
126 127 128 129
{
	if (pte_present(*pte)) {
		kvm_set_pte(pte, __pte(0));
		put_page(virt_to_page(pte));
130
		kvm_tlb_flush_vmid_ipa(kvm, addr);
131 132 133
	}
}

134 135
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
			unsigned long long start, u64 size)
136 137 138 139
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
140 141
	pte_t *pte;
	unsigned long long addr = start, end = start + size;
142
	u64 next;
143

144 145 146 147
	while (addr < end) {
		pgd = pgdp + pgd_index(addr);
		pud = pud_offset(pgd, addr);
		if (pud_none(*pud)) {
148
			addr = pud_addr_end(addr, end);
149 150
			continue;
		}
151

152 153 154 155 156 157 158 159 160 161
		if (pud_huge(*pud)) {
			/*
			 * If we are dealing with a huge pud, just clear it and
			 * move on.
			 */
			clear_pud_entry(kvm, pud, addr);
			addr = pud_addr_end(addr, end);
			continue;
		}

162 163
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
164
			addr = pmd_addr_end(addr, end);
165 166
			continue;
		}
167

168 169 170 171 172
		if (!kvm_pmd_huge(*pmd)) {
			pte = pte_offset_kernel(pmd, addr);
			clear_pte_entry(kvm, pte, addr);
			next = addr + PAGE_SIZE;
		}
173

174 175 176 177
		/*
		 * If the pmd entry is to be cleared, walk back up the ladder
		 */
		if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
178
			clear_pmd_entry(kvm, pmd, addr);
179
			next = pmd_addr_end(addr, end);
180
			if (page_empty(pmd) && !page_empty(pud)) {
181
				clear_pud_entry(kvm, pud, addr);
182
				next = pud_addr_end(addr, end);
183 184 185
			}
		}

186
		addr = next;
187
	}
188 189
}

190 191 192 193 194 195 196 197 198 199
/**
 * free_boot_hyp_pgd - free HYP boot page tables
 *
 * Free the HYP boot page tables. The bounce page is also freed.
 */
void free_boot_hyp_pgd(void)
{
	mutex_lock(&kvm_hyp_pgd_mutex);

	if (boot_hyp_pgd) {
200 201
		unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
		unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
202 203 204 205 206
		kfree(boot_hyp_pgd);
		boot_hyp_pgd = NULL;
	}

	if (hyp_pgd)
207
		unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
208 209 210 211 212 213 214

	kfree(init_bounce_page);
	init_bounce_page = NULL;

	mutex_unlock(&kvm_hyp_pgd_mutex);
}

215
/**
216
 * free_hyp_pgds - free Hyp-mode page tables
217
 *
218 219 220 221 222 223
 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
 * therefore contains either mappings in the kernel memory area (above
 * PAGE_OFFSET), or device mappings in the vmalloc range (from
 * VMALLOC_START to VMALLOC_END).
 *
 * boot_hyp_pgd should only map two pages for the init code.
224
 */
225
void free_hyp_pgds(void)
226 227 228
{
	unsigned long addr;

229
	free_boot_hyp_pgd();
230

231
	mutex_lock(&kvm_hyp_pgd_mutex);
232

233 234
	if (hyp_pgd) {
		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
235
			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
236
		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
237 238
			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);

239
		kfree(hyp_pgd);
240
		hyp_pgd = NULL;
241 242
	}

243 244 245 246
	mutex_unlock(&kvm_hyp_pgd_mutex);
}

static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
247 248
				    unsigned long end, unsigned long pfn,
				    pgprot_t prot)
249 250 251 252
{
	pte_t *pte;
	unsigned long addr;

253 254
	addr = start;
	do {
255 256
		pte = pte_offset_kernel(pmd, addr);
		kvm_set_pte(pte, pfn_pte(pfn, prot));
257
		get_page(virt_to_page(pte));
258
		kvm_flush_dcache_to_poc(pte, sizeof(*pte));
259
		pfn++;
260
	} while (addr += PAGE_SIZE, addr != end);
261 262 263
}

static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
264 265
				   unsigned long end, unsigned long pfn,
				   pgprot_t prot)
266 267 268 269 270
{
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr, next;

271 272
	addr = start;
	do {
273
		pmd = pmd_offset(pud, addr);
274 275 276 277

		BUG_ON(pmd_sect(*pmd));

		if (pmd_none(*pmd)) {
278
			pte = pte_alloc_one_kernel(NULL, addr);
279 280 281 282 283
			if (!pte) {
				kvm_err("Cannot allocate Hyp pte\n");
				return -ENOMEM;
			}
			pmd_populate_kernel(NULL, pmd, pte);
284
			get_page(virt_to_page(pmd));
285
			kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
286 287 288 289
		}

		next = pmd_addr_end(addr, end);

290 291
		create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
		pfn += (next - addr) >> PAGE_SHIFT;
292
	} while (addr = next, addr != end);
293 294 295 296

	return 0;
}

297 298 299
static int __create_hyp_mappings(pgd_t *pgdp,
				 unsigned long start, unsigned long end,
				 unsigned long pfn, pgprot_t prot)
300 301 302 303 304 305 306 307
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	unsigned long addr, next;
	int err = 0;

	mutex_lock(&kvm_hyp_pgd_mutex);
308 309 310
	addr = start & PAGE_MASK;
	end = PAGE_ALIGN(end);
	do {
311 312
		pgd = pgdp + pgd_index(addr);
		pud = pud_offset(pgd, addr);
313 314

		if (pud_none_or_clear_bad(pud)) {
315
			pmd = pmd_alloc_one(NULL, addr);
316 317 318 319 320 321
			if (!pmd) {
				kvm_err("Cannot allocate Hyp pmd\n");
				err = -ENOMEM;
				goto out;
			}
			pud_populate(NULL, pud, pmd);
322
			get_page(virt_to_page(pud));
323
			kvm_flush_dcache_to_poc(pud, sizeof(*pud));
324 325 326
		}

		next = pgd_addr_end(addr, end);
327
		err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
328 329
		if (err)
			goto out;
330
		pfn += (next - addr) >> PAGE_SHIFT;
331
	} while (addr = next, addr != end);
332 333 334 335 336 337
out:
	mutex_unlock(&kvm_hyp_pgd_mutex);
	return err;
}

/**
338
 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
339 340 341
 * @from:	The virtual kernel start address of the range
 * @to:		The virtual kernel end address of the range (exclusive)
 *
342 343 344
 * The same virtual address as the kernel virtual address is also used
 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
 * physical pages.
345 346 347
 */
int create_hyp_mappings(void *from, void *to)
{
348 349 350 351 352 353 354 355 356 357
	unsigned long phys_addr = virt_to_phys(from);
	unsigned long start = KERN_TO_HYP((unsigned long)from);
	unsigned long end = KERN_TO_HYP((unsigned long)to);

	/* Check for a valid kernel memory mapping */
	if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
		return -EINVAL;

	return __create_hyp_mappings(hyp_pgd, start, end,
				     __phys_to_pfn(phys_addr), PAGE_HYP);
358 359 360
}

/**
361 362 363
 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
 * @from:	The kernel start VA of the range
 * @to:		The kernel end VA of the range (exclusive)
364
 * @phys_addr:	The physical start address which gets mapped
365 366 367
 *
 * The resulting HYP VA is the same as the kernel VA, modulo
 * HYP_PAGE_OFFSET.
368
 */
369
int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
370
{
371 372 373 374 375 376 377 378 379
	unsigned long start = KERN_TO_HYP((unsigned long)from);
	unsigned long end = KERN_TO_HYP((unsigned long)to);

	/* Check for a valid kernel IO mapping */
	if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
		return -EINVAL;

	return __create_hyp_mappings(hyp_pgd, start, end,
				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
/**
 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
 * @kvm:	The KVM struct pointer for the VM.
 *
 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
 * support either full 40-bit input addresses or limited to 32-bit input
 * addresses). Clears the allocated pages.
 *
 * Note we don't need locking here as this is only called when the VM is
 * created, which can only be done once.
 */
int kvm_alloc_stage2_pgd(struct kvm *kvm)
{
	pgd_t *pgd;

	if (kvm->arch.pgd != NULL) {
		kvm_err("kvm_arch already initialized?\n");
		return -EINVAL;
	}

	pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
	if (!pgd)
		return -ENOMEM;

	memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
407
	kvm_clean_pgd(pgd);
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	kvm->arch.pgd = pgd;

	return 0;
}

/**
 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
 * @kvm:   The VM pointer
 * @start: The intermediate physical base address of the range to unmap
 * @size:  The size of the area to unmap
 *
 * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
 * destroying the VM), otherwise another faulting VCPU may come in and mess
 * with things behind our backs.
 */
static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
{
426
	unmap_range(kvm, kvm->arch.pgd, start, size);
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
}

/**
 * kvm_free_stage2_pgd - free all stage-2 tables
 * @kvm:	The KVM struct pointer for the VM.
 *
 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
 * underlying level-2 and level-3 tables before freeing the actual level-1 table
 * and setting the struct pointer to NULL.
 *
 * Note we don't need locking here as this is only called when the VM is
 * destroyed, which can only be done once.
 */
void kvm_free_stage2_pgd(struct kvm *kvm)
{
	if (kvm->arch.pgd == NULL)
		return;

	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
	free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
	kvm->arch.pgd = NULL;
}

450 451
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
			     phys_addr_t addr)
452 453 454 455 456 457 458 459 460
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = kvm->arch.pgd + pgd_index(addr);
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		if (!cache)
461
			return NULL;
462 463 464
		pmd = mmu_memory_cache_alloc(cache);
		pud_populate(NULL, pud, pmd);
		get_page(virt_to_page(pud));
465 466
	}

467 468 469 470 471 472 473 474 475 476
	return pmd_offset(pud, addr);
}

static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
			       *cache, phys_addr_t addr, const pmd_t *new_pmd)
{
	pmd_t *pmd, old_pmd;

	pmd = stage2_get_pmd(kvm, cache, addr);
	VM_BUG_ON(!pmd);
477

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	/*
	 * Mapping in huge pages should only happen through a fault.  If a
	 * page is merged into a transparent huge page, the individual
	 * subpages of that huge page should be unmapped through MMU
	 * notifiers before we get here.
	 *
	 * Merging of CompoundPages is not supported; they should become
	 * splitting first, unmapped, merged, and mapped back in on-demand.
	 */
	VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));

	old_pmd = *pmd;
	kvm_set_pmd(pmd, *new_pmd);
	if (pmd_present(old_pmd))
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	else
		get_page(virt_to_page(pmd));
	return 0;
}

static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
			  phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
	pmd_t *pmd;
	pte_t *pte, old_pte;

	/* Create stage-2 page table mapping - Level 1 */
	pmd = stage2_get_pmd(kvm, cache, addr);
	if (!pmd) {
		/*
		 * Ignore calls from kvm_set_spte_hva for unallocated
		 * address ranges.
		 */
		return 0;
	}

	/* Create stage-2 page mappings - Level 2 */
515 516 517 518
	if (pmd_none(*pmd)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pte = mmu_memory_cache_alloc(cache);
519
		kvm_clean_pte(pte);
520 521
		pmd_populate_kernel(NULL, pmd, pte);
		get_page(virt_to_page(pmd));
522 523 524
	}

	pte = pte_offset_kernel(pmd, addr);
525 526 527 528 529 530 531 532

	if (iomap && pte_present(*pte))
		return -EFAULT;

	/* Create 2nd stage page table mapping - Level 3 */
	old_pte = *pte;
	kvm_set_pte(pte, *new_pte);
	if (pte_present(old_pte))
533
		kvm_tlb_flush_vmid_ipa(kvm, addr);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	else
		get_page(virt_to_page(pte));

	return 0;
}

/**
 * kvm_phys_addr_ioremap - map a device range to guest IPA
 *
 * @kvm:	The KVM pointer
 * @guest_ipa:	The IPA at which to insert the mapping
 * @pa:		The physical address of the device
 * @size:	The size of the mapping
 */
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
			  phys_addr_t pa, unsigned long size)
{
	phys_addr_t addr, end;
	int ret = 0;
	unsigned long pfn;
	struct kvm_mmu_memory_cache cache = { 0, };

	end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
	pfn = __phys_to_pfn(pa);

	for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
560
		pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578

		ret = mmu_topup_memory_cache(&cache, 2, 2);
		if (ret)
			goto out;
		spin_lock(&kvm->mmu_lock);
		ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
		spin_unlock(&kvm->mmu_lock);
		if (ret)
			goto out;

		pfn++;
	}

out:
	mmu_free_memory_cache(&cache);
	return ret;
}

579
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
580
			  struct kvm_memory_slot *memslot,
581 582 583
			  unsigned long fault_status)
{
	int ret;
584
	bool write_fault, writable, hugetlb = false;
585
	unsigned long mmu_seq;
586 587 588
	gfn_t gfn = fault_ipa >> PAGE_SHIFT;
	unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
	struct kvm *kvm = vcpu->kvm;
589
	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
590 591
	struct vm_area_struct *vma;
	pfn_t pfn;
592

593
	write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
594 595 596 597 598
	if (fault_status == FSC_PERM && !write_fault) {
		kvm_err("Unexpected L2 read permission error\n");
		return -EFAULT;
	}

599 600 601 602 603 604 605 606 607
	/* Let's check if we will get back a huge page backed by hugetlbfs */
	down_read(&current->mm->mmap_sem);
	vma = find_vma_intersection(current->mm, hva, hva + 1);
	if (is_vm_hugetlb_page(vma)) {
		hugetlb = true;
		gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
	}
	up_read(&current->mm->mmap_sem);

608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
	/* We need minimum second+third level pages */
	ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
	if (ret)
		return ret;

	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	/*
	 * Ensure the read of mmu_notifier_seq happens before we call
	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
	 * the page we just got a reference to gets unmapped before we have a
	 * chance to grab the mmu_lock, which ensure that if the page gets
	 * unmapped afterwards, the call to kvm_unmap_hva will take it away
	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
	 */
	smp_rmb();

625
	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
626 627 628
	if (is_error_pfn(pfn))
		return -EFAULT;

629 630
	spin_lock(&kvm->mmu_lock);
	if (mmu_notifier_retry(kvm, mmu_seq))
631
		goto out_unlock;
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649

	if (hugetlb) {
		pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
		new_pmd = pmd_mkhuge(new_pmd);
		if (writable) {
			kvm_set_s2pmd_writable(&new_pmd);
			kvm_set_pfn_dirty(pfn);
		}
		coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
	} else {
		pte_t new_pte = pfn_pte(pfn, PAGE_S2);
		if (writable) {
			kvm_set_s2pte_writable(&new_pte);
			kvm_set_pfn_dirty(pfn);
		}
		coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
650
	}
651

652 653

out_unlock:
654
	spin_unlock(&kvm->mmu_lock);
655
	kvm_release_pfn_clean(pfn);
656
	return ret;
657 658 659 660 661 662 663 664 665 666 667 668 669 670
}

/**
 * kvm_handle_guest_abort - handles all 2nd stage aborts
 * @vcpu:	the VCPU pointer
 * @run:	the kvm_run structure
 *
 * Any abort that gets to the host is almost guaranteed to be caused by a
 * missing second stage translation table entry, which can mean that either the
 * guest simply needs more memory and we must allocate an appropriate page or it
 * can mean that the guest tried to access I/O memory, which is emulated by user
 * space. The distinction is based on the IPA causing the fault and whether this
 * memory region has been registered as standard RAM by user space.
 */
671 672
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
673 674 675 676 677 678 679
	unsigned long fault_status;
	phys_addr_t fault_ipa;
	struct kvm_memory_slot *memslot;
	bool is_iabt;
	gfn_t gfn;
	int ret, idx;

680
	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
681
	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
682

683 684
	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
			      kvm_vcpu_get_hfar(vcpu), fault_ipa);
685 686

	/* Check the stage-2 fault is trans. fault or write fault */
687
	fault_status = kvm_vcpu_trap_get_fault(vcpu);
688
	if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
689 690
		kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
			kvm_vcpu_trap_get_class(vcpu), fault_status);
691 692 693 694 695 696 697 698 699
		return -EFAULT;
	}

	idx = srcu_read_lock(&vcpu->kvm->srcu);

	gfn = fault_ipa >> PAGE_SHIFT;
	if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
		if (is_iabt) {
			/* Prefetch Abort on I/O address */
700
			kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
701 702 703 704 705 706 707 708 709 710 711
			ret = 1;
			goto out_unlock;
		}

		if (fault_status != FSC_FAULT) {
			kvm_err("Unsupported fault status on io memory: %#lx\n",
				fault_status);
			ret = -EFAULT;
			goto out_unlock;
		}

M
Marc Zyngier 已提交
712 713 714 715 716 717 718
		/*
		 * The IPA is reported as [MAX:12], so we need to
		 * complement it with the bottom 12 bits from the
		 * faulting VA. This is always 12 bits, irrespective
		 * of the page size.
		 */
		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
C
Christoffer Dall 已提交
719
		ret = io_mem_abort(vcpu, run, fault_ipa);
720 721 722 723 724
		goto out_unlock;
	}

	memslot = gfn_to_memslot(vcpu->kvm, gfn);

725
	ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
726 727 728 729 730
	if (ret == 0)
		ret = 1;
out_unlock:
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
	return ret;
731 732
}

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
static void handle_hva_to_gpa(struct kvm *kvm,
			      unsigned long start,
			      unsigned long end,
			      void (*handler)(struct kvm *kvm,
					      gpa_t gpa, void *data),
			      void *data)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;

	slots = kvm_memslots(kvm);

	/* we only care about the pages that the guest sees */
	kvm_for_each_memslot(memslot, slots) {
		unsigned long hva_start, hva_end;
		gfn_t gfn, gfn_end;

		hva_start = max(start, memslot->userspace_addr);
		hva_end = min(end, memslot->userspace_addr +
					(memslot->npages << PAGE_SHIFT));
		if (hva_start >= hva_end)
			continue;

		/*
		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
		 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
		 */
		gfn = hva_to_gfn_memslot(hva_start, memslot);
		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);

		for (; gfn < gfn_end; ++gfn) {
			gpa_t gpa = gfn << PAGE_SHIFT;
			handler(kvm, gpa, data);
		}
	}
}

static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
	unmap_stage2_range(kvm, gpa, PAGE_SIZE);
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
	unsigned long end = hva + PAGE_SIZE;

	if (!kvm->arch.pgd)
		return 0;

	trace_kvm_unmap_hva(hva);
	handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
	return 0;
}

int kvm_unmap_hva_range(struct kvm *kvm,
			unsigned long start, unsigned long end)
{
	if (!kvm->arch.pgd)
		return 0;

	trace_kvm_unmap_hva_range(start, end);
	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
	return 0;
}

static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
	pte_t *pte = (pte_t *)data;

	stage2_set_pte(kvm, NULL, gpa, pte, false);
}


void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
	unsigned long end = hva + PAGE_SIZE;
	pte_t stage2_pte;

	if (!kvm->arch.pgd)
		return;

	trace_kvm_set_spte_hva(hva);
	stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
	handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}

void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}

824 825 826 827 828
phys_addr_t kvm_mmu_get_httbr(void)
{
	return virt_to_phys(hyp_pgd);
}

829 830 831 832 833 834 835 836 837 838
phys_addr_t kvm_mmu_get_boot_httbr(void)
{
	return virt_to_phys(boot_hyp_pgd);
}

phys_addr_t kvm_get_idmap_vector(void)
{
	return hyp_idmap_vector;
}

839 840
int kvm_mmu_init(void)
{
841 842
	int err;

843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
	hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
	hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
	hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);

	if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
		/*
		 * Our init code is crossing a page boundary. Allocate
		 * a bounce page, copy the code over and use that.
		 */
		size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
		phys_addr_t phys_base;

		init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (!init_bounce_page) {
			kvm_err("Couldn't allocate HYP init bounce page\n");
			err = -ENOMEM;
			goto out;
		}

		memcpy(init_bounce_page, __hyp_idmap_text_start, len);
		/*
		 * Warning: the code we just copied to the bounce page
		 * must be flushed to the point of coherency.
		 * Otherwise, the data may be sitting in L2, and HYP
		 * mode won't be able to observe it as it runs with
		 * caches off at that point.
		 */
		kvm_flush_dcache_to_poc(init_bounce_page, len);

		phys_base = virt_to_phys(init_bounce_page);
		hyp_idmap_vector += phys_base - hyp_idmap_start;
		hyp_idmap_start = phys_base;
		hyp_idmap_end = phys_base + len;

		kvm_info("Using HYP init bounce page @%lx\n",
			 (unsigned long)phys_base);
	}

881
	hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
882 883
	boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
	if (!hyp_pgd || !boot_hyp_pgd) {
884
		kvm_err("Hyp mode PGD not allocated\n");
885 886 887 888 889 890 891 892 893 894 895 896 897 898
		err = -ENOMEM;
		goto out;
	}

	/* Create the idmap in the boot page tables */
	err = 	__create_hyp_mappings(boot_hyp_pgd,
				      hyp_idmap_start, hyp_idmap_end,
				      __phys_to_pfn(hyp_idmap_start),
				      PAGE_HYP);

	if (err) {
		kvm_err("Failed to idmap %lx-%lx\n",
			hyp_idmap_start, hyp_idmap_end);
		goto out;
899 900
	}

901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
	/* Map the very same page at the trampoline VA */
	err = 	__create_hyp_mappings(boot_hyp_pgd,
				      TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
				      __phys_to_pfn(hyp_idmap_start),
				      PAGE_HYP);
	if (err) {
		kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
			TRAMPOLINE_VA);
		goto out;
	}

	/* Map the same page again into the runtime page tables */
	err = 	__create_hyp_mappings(hyp_pgd,
				      TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
				      __phys_to_pfn(hyp_idmap_start),
				      PAGE_HYP);
	if (err) {
		kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
			TRAMPOLINE_VA);
		goto out;
	}

923
	return 0;
924
out:
925
	free_hyp_pgds();
926
	return err;
927
}