kvm_mmu.h 9.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright (C) 2012,2013 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __ARM64_KVM_MMU_H__
#define __ARM64_KVM_MMU_H__

#include <asm/page.h>
#include <asm/memory.h>
23
#include <asm/cpufeature.h>
24 25

/*
26
 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 28 29 30 31
 * "negative" addresses. This makes it impossible to directly share
 * mappings with the kernel.
 *
 * Instead, give the HYP mode its own VA region at a fixed offset from
 * the kernel by just masking the top bits (which are all ones for a
32
 * kernel address). We need to find out how many bits to mask.
33
 *
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
 * We want to build a set of page tables that cover both parts of the
 * idmap (the trampoline page used to initialize EL2), and our normal
 * runtime VA space, at the same time.
 *
 * Given that the kernel uses VA_BITS for its entire address space,
 * and that half of that space (VA_BITS - 1) is used for the linear
 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
 *
 * The main question is "Within the VA_BITS space, does EL2 use the
 * top or the bottom half of that space to shadow the kernel's linear
 * mapping?". As we need to idmap the trampoline page, this is
 * determined by the range in which this page lives.
 *
 * If the page is in the bottom half, we have to use the top half. If
 * the page is in the top half, we have to use the bottom half:
 *
 * T = __virt_to_phys(__hyp_idmap_text_start)
 * if (T & BIT(VA_BITS - 1))
 *	HYP_VA_MIN = 0  //idmap in upper half
 * else
 *	HYP_VA_MIN = 1 << (VA_BITS - 1)
 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
 *
 * This of course assumes that the trampoline page exists within the
 * VA_BITS range. If it doesn't, then it means we're in the odd case
 * where the kernel idmap (as well as HYP) uses more levels than the
 * kernel runtime page tables (as seen when the kernel is configured
 * for 4k pages, 39bits VA, and yet memory lives just above that
 * limit, forcing the idmap to use 4 levels of page tables while the
 * kernel itself only uses 3). In this particular case, it doesn't
 * matter which side of VA_BITS we use, as we're guaranteed not to
 * conflict with anything.
 *
 * When using VHE, there are no separate hyp mappings and all KVM
 * functionality is already mapped as part of the main kernel
 * mappings, and none of this applies in that case.
70
 */
71 72 73 74

#define HYP_PAGE_OFFSET_HIGH_MASK	((UL(1) << VA_BITS) - 1)
#define HYP_PAGE_OFFSET_LOW_MASK	((UL(1) << (VA_BITS - 1)) - 1)

75 76
#ifdef __ASSEMBLY__

77 78 79
#include <asm/alternative.h>
#include <asm/cpufeature.h>

80 81 82
/*
 * Convert a kernel VA into a HYP VA.
 * reg: VA to be converted.
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
 *
 * This generates the following sequences:
 * - High mask:
 *		and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
 *		nop
 * - Low mask:
 *		and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
 *		and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
 * - VHE:
 *		nop
 *		nop
 *
 * The "low mask" version works because the mask is a strict subset of
 * the "high mask", hence performing the first mask for nothing.
 * Should be completely invisible on any viable CPU.
98 99
 */
.macro kern_hyp_va	reg
100 101
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
	and     \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
102 103
alternative_else_nop_endif
alternative_if ARM64_HYP_OFFSET_LOW
104
	and     \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
105
alternative_else_nop_endif
106 107 108 109
.endm

#else

110
#include <asm/pgalloc.h>
111 112
#include <asm/cachetype.h>
#include <asm/cacheflush.h>
113 114
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
115

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
static inline unsigned long __kern_hyp_va(unsigned long v)
{
	asm volatile(ALTERNATIVE("and %0, %0, %1",
				 "nop",
				 ARM64_HAS_VIRT_HOST_EXTN)
		     : "+r" (v)
		     : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
	asm volatile(ALTERNATIVE("nop",
				 "and %0, %0, %1",
				 ARM64_HYP_OFFSET_LOW)
		     : "+r" (v)
		     : "i" (HYP_PAGE_OFFSET_LOW_MASK));
	return v;
}

#define kern_hyp_va(v) 	(typeof(v))(__kern_hyp_va((unsigned long)(v)))
132 133

/*
134
 * We currently only support a 40bit IPA.
135
 */
136
#define KVM_PHYS_SHIFT	(40)
137 138 139
#define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)

140 141
#include <asm/stage2_pgtable.h>

142
int create_hyp_mappings(void *from, void *to, pgprot_t prot);
143 144 145
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_hyp_pgds(void);

146
void stage2_unmap_vm(struct kvm *kvm);
147 148 149
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
150
			  phys_addr_t pa, unsigned long size, bool writable);
151 152 153 154 155 156 157

int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);

void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);

phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
158
phys_addr_t kvm_get_idmap_start(void);
159 160 161 162
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);

#define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
163
#define	kvm_set_pmd(pmdp, pmd)		set_pmd(pmdp, pmd)
164

165
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
166
{
167 168
	pte_val(pte) |= PTE_S2_RDWR;
	return pte;
169 170
}

171
static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
172
{
173 174
	pmd_val(pmd) |= PMD_S2_RDWR;
	return pmd;
175 176
}

177 178
static inline void kvm_set_s2pte_readonly(pte_t *pte)
{
179 180 181 182 183 184 185 186 187 188 189 190
	pteval_t pteval;
	unsigned long tmp;

	asm volatile("//	kvm_set_s2pte_readonly\n"
	"	prfm	pstl1strm, %2\n"
	"1:	ldxr	%0, %2\n"
	"	and	%0, %0, %3		// clear PTE_S2_RDWR\n"
	"	orr	%0, %0, %4		// set PTE_S2_RDONLY\n"
	"	stxr	%w1, %0, %2\n"
	"	cbnz	%w1, 1b\n"
	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
	: "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
191 192 193 194 195 196 197 198 199
}

static inline bool kvm_s2pte_readonly(pte_t *pte)
{
	return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
}

static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{
200
	kvm_set_s2pte_readonly((pte_t *)pmd);
201 202 203 204
}

static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
{
205
	return kvm_s2pte_readonly((pte_t *)pmd);
206 207
}

208 209 210 211 212 213
static inline bool kvm_page_empty(void *ptr)
{
	struct page *ptr_page = virt_to_page(ptr);
	return page_count(ptr_page) == 1;
}

214
#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
215 216

#ifdef __PAGETABLE_PMD_FOLDED
217
#define hyp_pmd_table_empty(pmdp) (0)
218
#else
219
#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
220 221 222
#endif

#ifdef __PAGETABLE_PUD_FOLDED
223
#define hyp_pud_table_empty(pudp) (0)
224
#else
225
#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
226 227
#endif

228 229
struct kvm;

230 231 232
#define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))

static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
233
{
234 235 236
	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}

D
Dan Williams 已提交
237 238
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
					       kvm_pfn_t pfn,
239 240
					       unsigned long size,
					       bool ipa_uncached)
241
{
242 243
	void *va = page_address(pfn_to_page(pfn));

244
	if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
245
		kvm_flush_dcache_to_poc(va, size);
246

247
	if (!icache_is_aliasing()) {		/* PIPT */
248 249
		flush_icache_range((unsigned long)va,
				   (unsigned long)va + size);
250 251 252 253 254 255
	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
		/* any kind of VIPT cache */
		__flush_icache_all();
	}
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
	struct page *page = pte_page(pte);
	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
}

static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
	struct page *page = pmd_page(pmd);
	kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
}

static inline void __kvm_flush_dcache_pud(pud_t pud)
{
	struct page *page = pud_page(pud);
	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
}

274
#define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
275

276 277
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
278

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
	return __cpu_uses_extended_idmap();
}

static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
				       pgd_t *hyp_pgd,
				       pgd_t *merged_hyp_pgd,
				       unsigned long hyp_idmap_start)
{
	int idmap_idx;

	/*
	 * Use the first entry to access the HYP mappings. It is
	 * guaranteed to be free, otherwise we wouldn't use an
	 * extended idmap.
	 */
	VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
	merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);

	/*
	 * Create another extended level entry that points to the boot HYP map,
	 * which contains an ID mapping of the HYP init code. We essentially
	 * merge the boot and runtime HYP maps by doing so, but they don't
	 * overlap anyway, so this is fine.
	 */
	idmap_idx = hyp_idmap_start >> VA_BITS;
	VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
	merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
}

310 311 312 313
static inline unsigned int kvm_get_vmid_bits(void)
{
	int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);

314
	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
315 316
}

317 318
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */