kvm_mmu.h 9.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright (C) 2012,2013 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __ARM64_KVM_MMU_H__
#define __ARM64_KVM_MMU_H__

#include <asm/page.h>
#include <asm/memory.h>
23
#include <asm/cpufeature.h>
24 25

/*
26
 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 28 29 30 31 32
 * "negative" addresses. This makes it impossible to directly share
 * mappings with the kernel.
 *
 * Instead, give the HYP mode its own VA region at a fixed offset from
 * the kernel by just masking the top bits (which are all ones for a
 * kernel address).
33 34 35
 *
 * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
 * macros (the entire kernel runs at EL2).
36 37 38 39 40 41 42 43 44 45 46 47
 */
#define HYP_PAGE_OFFSET_SHIFT	VA_BITS
#define HYP_PAGE_OFFSET_MASK	((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
#define HYP_PAGE_OFFSET		(PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)

/*
 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
 * shared across all the page-tables. Conveniently, we use the last
 * possible page, where no kernel mapping will ever exist.
 */
#define TRAMPOLINE_VA		(HYP_PAGE_OFFSET_MASK & PAGE_MASK)

48 49 50 51 52 53 54 55 56 57 58 59
/*
 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
 * levels in addition to the PGD and potentially the PUD which are
 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
 * tables use one level of tables less than the kernel.
 */
#ifdef CONFIG_ARM64_64K_PAGES
#define KVM_MMU_CACHE_MIN_PAGES	1
#else
#define KVM_MMU_CACHE_MIN_PAGES	2
#endif

60 61
#ifdef __ASSEMBLY__

62 63 64
#include <asm/alternative.h>
#include <asm/cpufeature.h>

65 66 67 68 69
/*
 * Convert a kernel VA into a HYP VA.
 * reg: VA to be converted.
 */
.macro kern_hyp_va	reg
70
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN	
71
	and	\reg, \reg, #HYP_PAGE_OFFSET_MASK
72 73 74
alternative_else
	nop
alternative_endif
75 76 77 78
.endm

#else

79
#include <asm/pgalloc.h>
80 81
#include <asm/cachetype.h>
#include <asm/cacheflush.h>
82 83
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
84 85 86 87

#define KERN_TO_HYP(kva)	((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)

/*
88
 * We currently only support a 40bit IPA.
89
 */
90
#define KVM_PHYS_SHIFT	(40)
91 92 93
#define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)

94 95
#include <asm/stage2_pgtable.h>

96 97 98 99 100
int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);

101
void stage2_unmap_vm(struct kvm *kvm);
102 103 104
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
105
			  phys_addr_t pa, unsigned long size, bool writable);
106 107 108 109 110 111 112 113 114 115 116 117

int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);

void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);

phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);

#define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
118
#define	kvm_set_pmd(pmdp, pmd)		set_pmd(pmdp, pmd)
119 120

static inline void kvm_clean_pgd(pgd_t *pgd) {}
121
static inline void kvm_clean_pmd(pmd_t *pmd) {}
122 123 124 125 126 127 128 129 130
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
static inline void kvm_clean_pte(pte_t *pte) {}
static inline void kvm_clean_pte_entry(pte_t *pte) {}

static inline void kvm_set_s2pte_writable(pte_t *pte)
{
	pte_val(*pte) |= PTE_S2_RDWR;
}

131 132 133 134 135
static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
{
	pmd_val(*pmd) |= PMD_S2_RDWR;
}

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
static inline void kvm_set_s2pte_readonly(pte_t *pte)
{
	pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
}

static inline bool kvm_s2pte_readonly(pte_t *pte)
{
	return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
}

static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{
	pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
}

static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
{
	return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
}


157 158 159 160
#define kvm_pgd_addr_end(addr, end)	pgd_addr_end(addr, end)
#define kvm_pud_addr_end(addr, end)	pud_addr_end(addr, end)
#define kvm_pmd_addr_end(addr, end)	pmd_addr_end(addr, end)

161 162
#define kvm_pgd_index(addr)	(((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
static inline void *kvm_get_hwpgd(struct kvm *kvm)
{
	pgd_t *pgd = kvm->arch.pgd;
	pud_t *pud;

	if (KVM_PREALLOC_LEVEL == 0)
		return pgd;

	pud = pud_offset(pgd, 0);
	if (KVM_PREALLOC_LEVEL == 1)
		return pud;

	BUG_ON(KVM_PREALLOC_LEVEL != 2);
	return pmd_offset(pud, 0);
}

179
static inline unsigned int kvm_get_hwpgd_size(void)
180
{
181 182 183
	if (KVM_PREALLOC_LEVEL > 0)
		return PTRS_PER_S2_PGD * PAGE_SIZE;
	return PTRS_PER_S2_PGD * sizeof(pgd_t);
184 185
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
/*
 * Allocate fake pgd for the host kernel page table macros to work.
 * This is not used by the hardware and we have no alignment
 * requirement for this allocation.
 */
static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
{
	int i;
	pgd_t *pgd;

	if (!KVM_PREALLOC_LEVEL)
		return hwpgd;

	/*
	 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
	 * the PMD and the kernel will use folded pud.
	 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
	 * pages.
	 */

	pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
			GFP_KERNEL | __GFP_ZERO);
	if (!pgd)
		return ERR_PTR(-ENOMEM);

	/* Plug the HW PGD into the fake one. */
	for (i = 0; i < PTRS_PER_S2_PGD; i++) {
		if (KVM_PREALLOC_LEVEL == 1)
			pgd_populate(NULL, pgd + i,
				     (pud_t *)hwpgd + i * PTRS_PER_PUD);
		else if (KVM_PREALLOC_LEVEL == 2)
			pud_populate(NULL, pud_offset(pgd, 0) + i,
				     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
	}

	return pgd;
}

static inline void kvm_free_fake_pgd(pgd_t *pgd)
{
	if (KVM_PREALLOC_LEVEL > 0)
		kfree(pgd);
}
229 230 231 232 233 234
static inline bool kvm_page_empty(void *ptr)
{
	struct page *ptr_page = virt_to_page(ptr);
	return page_count(ptr_page) == 1;
}

235 236 237 238 239 240 241 242 243 244 245
#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)

#ifdef __PAGETABLE_PMD_FOLDED
#define kvm_pmd_table_empty(kvm, pmdp) (0)
#else
#define kvm_pmd_table_empty(kvm, pmdp) \
	(kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
#endif

#ifdef __PAGETABLE_PUD_FOLDED
#define kvm_pud_table_empty(kvm, pudp) (0)
246
#else
247 248
#define kvm_pud_table_empty(kvm, pudp) \
	(kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
249 250 251
#endif


252 253
struct kvm;

254 255 256
#define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))

static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
257
{
258 259 260
	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}

D
Dan Williams 已提交
261 262
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
					       kvm_pfn_t pfn,
263 264
					       unsigned long size,
					       bool ipa_uncached)
265
{
266 267
	void *va = page_address(pfn_to_page(pfn));

268
	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
269
		kvm_flush_dcache_to_poc(va, size);
270

271
	if (!icache_is_aliasing()) {		/* PIPT */
272 273
		flush_icache_range((unsigned long)va,
				   (unsigned long)va + size);
274 275 276 277 278 279
	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
		/* any kind of VIPT cache */
		__flush_icache_all();
	}
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
	struct page *page = pte_page(pte);
	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
}

static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
	struct page *page = pmd_page(pmd);
	kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
}

static inline void __kvm_flush_dcache_pud(pud_t pud)
{
	struct page *page = pud_page(pud);
	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
}

298
#define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
299

300 301
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
302

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
	return __cpu_uses_extended_idmap();
}

static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
				       pgd_t *hyp_pgd,
				       pgd_t *merged_hyp_pgd,
				       unsigned long hyp_idmap_start)
{
	int idmap_idx;

	/*
	 * Use the first entry to access the HYP mappings. It is
	 * guaranteed to be free, otherwise we wouldn't use an
	 * extended idmap.
	 */
	VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
	merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);

	/*
	 * Create another extended level entry that points to the boot HYP map,
	 * which contains an ID mapping of the HYP init code. We essentially
	 * merge the boot and runtime HYP maps by doing so, but they don't
	 * overlap anyway, so this is fine.
	 */
	idmap_idx = hyp_idmap_start >> VA_BITS;
	VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
	merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
}

334 335 336 337
static inline unsigned int kvm_get_vmid_bits(void)
{
	int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);

338
	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
339 340
}

341 342
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */