mmu.c 8.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * KVM/MIPS MMU handling in the KVM module.
 *
 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 */

12
#include <linux/highmem.h>
13 14 15 16 17
#include <linux/kvm_host.h>
#include <asm/mmu_context.h>

static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
18
	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
19 20
	int cpu = smp_processor_id();

21
	return cpu_asid(cpu, kern_mm);
22 23 24 25
}

static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
26
	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
27 28
	int cpu = smp_processor_id();

29
	return cpu_asid(cpu, user_mm);
30 31 32 33 34 35 36 37 38 39 40
}

static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
{
	int srcu_idx, err = 0;
	kvm_pfn_t pfn;

	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
		return 0;

	srcu_idx = srcu_read_lock(&kvm->srcu);
41
	pfn = gfn_to_pfn(kvm, gfn);
42

43
	if (is_error_noslot_pfn(pfn)) {
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
		kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
		err = -EFAULT;
		goto out;
	}

	kvm->arch.guest_pmap[gfn] = pfn;
out:
	srcu_read_unlock(&kvm->srcu, srcu_idx);
	return err;
}

/* Translate guest KSEG0 addresses to Host PA */
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
						    unsigned long gva)
{
	gfn_t gfn;
	unsigned long offset = gva & ~PAGE_MASK;
	struct kvm *kvm = vcpu->kvm;

	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
			__builtin_return_address(0), gva);
		return KVM_INVALID_PAGE;
	}

	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);

	if (gfn >= kvm->arch.guest_pmap_npages) {
		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
			gva);
		return KVM_INVALID_PAGE;
	}

	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
		return KVM_INVALID_ADDR;

	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
}

/* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
				    struct kvm_vcpu *vcpu)
{
	gfn_t gfn;
	kvm_pfn_t pfn0, pfn1;
	unsigned long vaddr = 0;
	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
	struct kvm *kvm = vcpu->kvm;
	const int flush_dcache_mask = 0;
	int ret;

	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
		kvm_mips_dump_host_tlbs();
		return -1;
	}

	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102
	if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
103 104 105 106 107 108 109 110 111 112 113 114 115
		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
			gfn, badvaddr);
		kvm_mips_dump_host_tlbs();
		return -1;
	}
	vaddr = badvaddr & (PAGE_MASK << 1);

	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
		return -1;

	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
		return -1;

116 117
	pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
	pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
118

119
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
120 121
		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
		ENTRYLO_D | ENTRYLO_V;
122
	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
123 124
		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
		ENTRYLO_D | ENTRYLO_V;
125 126 127 128 129 130 131 132 133 134 135

	preempt_disable();
	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
				      flush_dcache_mask);
	preempt_enable();

	return ret;
}

int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
136
					 struct kvm_mips_tlb *tlb)
137 138 139 140
{
	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
	struct kvm *kvm = vcpu->kvm;
	kvm_pfn_t pfn0, pfn1;
141
	gfn_t gfn0, gfn1;
142
	long tlb_lo[2];
143 144
	int ret;

145 146 147 148 149 150 151 152 153 154 155
	tlb_lo[0] = tlb->tlb_lo[0];
	tlb_lo[1] = tlb->tlb_lo[1];

	/*
	 * The commpage address must not be mapped to anything else if the guest
	 * TLB contains entries nearby, or commpage accesses will break.
	 */
	if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
			VPN2_MASK & (PAGE_MASK << 1)))
		tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;

156 157 158 159 160 161 162 163 164 165 166
	gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
	gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
	if (gfn0 >= kvm->arch.guest_pmap_npages ||
	    gfn1 >= kvm->arch.guest_pmap_npages) {
		kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
			__func__, gfn0, gfn1, tlb->tlb_hi);
		kvm_mips_dump_guest_tlbs(vcpu);
		return -1;
	}

	if (kvm_mips_map_page(kvm, gfn0) < 0)
167 168
		return -1;

169
	if (kvm_mips_map_page(kvm, gfn1) < 0)
170 171
		return -1;

172 173
	pfn0 = kvm->arch.guest_pmap[gfn0];
	pfn1 = kvm->arch.guest_pmap[gfn1];
174 175

	/* Get attributes from the Guest TLB */
176
	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
177
		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
178 179
		(tlb_lo[0] & ENTRYLO_D) |
		(tlb_lo[0] & ENTRYLO_V);
180
	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
181
		((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
182 183
		(tlb_lo[1] & ENTRYLO_D) |
		(tlb_lo[1] & ENTRYLO_V);
184 185

	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
186
		  tlb->tlb_lo[0], tlb->tlb_lo[1]);
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255

	preempt_disable();
	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
					       kvm_mips_get_kernel_asid(vcpu) :
					       kvm_mips_get_user_asid(vcpu));
	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
				      tlb->tlb_mask);
	preempt_enable();

	return ret;
}

void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
			     struct kvm_vcpu *vcpu)
{
	unsigned long asid = asid_cache(cpu);

	asid += cpu_asid_inc();
	if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
		if (cpu_has_vtag_icache)
			flush_icache_all();

		kvm_local_flush_tlb_all();      /* start new asid cycle */

		if (!asid)      /* fix version if needed */
			asid = asid_first_version(cpu);
	}

	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}

/**
 * kvm_mips_migrate_count() - Migrate timer.
 * @vcpu:	Virtual CPU.
 *
 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
 * if it was running prior to being cancelled.
 *
 * Must be called when the VCPU is migrated to a different CPU to ensure that
 * timer expiry during guest execution interrupts the guest and causes the
 * interrupt to be delivered in a timely manner.
 */
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
{
	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
		hrtimer_restart(&vcpu->arch.comparecount_timer);
}

/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	unsigned long flags;

	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);

	local_irq_save(flags);

	if (vcpu->arch.last_sched_cpu != cpu) {
		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
		/*
		 * Migrate the timer interrupt to the current CPU so that it
		 * always interrupts the guest and synchronously triggers a
		 * guest timer interrupt.
		 */
		kvm_mips_migrate_count(vcpu);
	}

	/* restore guest state to registers */
256
	kvm_mips_callbacks->vcpu_load(vcpu, cpu);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

	local_irq_restore(flags);
}

/* ASID can change if another task is scheduled during preemption */
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
	unsigned long flags;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	vcpu->arch.last_sched_cpu = cpu;

	/* save guest state in registers */
273
	kvm_mips_callbacks->vcpu_put(vcpu, cpu);
274 275 276 277 278 279 280 281

	local_irq_restore(flags);
}

u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	unsigned long paddr, flags, vpn2, asid;
282
	unsigned long va = (unsigned long)opc;
283
	void *vaddr;
284 285 286
	u32 inst;
	int index;

287 288
	if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
	    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
289
		local_irq_save(flags);
290
		index = kvm_mips_host_tlb_lookup(vcpu, va);
291 292 293
		if (index >= 0) {
			inst = *(opc);
		} else {
294
			vpn2 = va & VPN2_MASK;
295 296 297 298 299 300 301
			asid = kvm_read_c0_guest_entryhi(cop0) &
						KVM_ENTRYHI_ASID;
			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
			if (index < 0) {
				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
					__func__, opc, vcpu, read_c0_entryhi());
				kvm_mips_dump_host_tlbs();
302
				kvm_mips_dump_guest_tlbs(vcpu);
303 304 305
				local_irq_restore(flags);
				return KVM_INVALID_INST;
			}
306 307 308 309 310 311 312 313 314
			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
						&vcpu->arch.guest_tlb[index])) {
				kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
					__func__, opc, index, vcpu,
					read_c0_entryhi());
				kvm_mips_dump_guest_tlbs(vcpu);
				local_irq_restore(flags);
				return KVM_INVALID_INST;
			}
315 316 317
			inst = *(opc);
		}
		local_irq_restore(flags);
318 319
	} else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
		paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
320 321 322 323
		vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
		vaddr += paddr & ~PAGE_MASK;
		inst = *(u32 *)vaddr;
		kunmap_atomic(vaddr);
324 325 326 327 328 329 330
	} else {
		kvm_err("%s: illegal address: %p\n", __func__, opc);
		return KVM_INVALID_INST;
	}

	return inst;
}