e500.c 14.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 5 6 7 8 9 10 11 12
 *
 * Author: Yu Liu, <yu.liu@freescale.com>
 *
 * Description:
 * This file is derived from arch/powerpc/kvm/44x.c,
 * by Hollis Blanchard <hollisb@us.ibm.com>.
 */

#include <linux/kvm_host.h>
13
#include <linux/slab.h>
14
#include <linux/err.h>
15
#include <linux/export.h>
16 17
#include <linux/module.h>
#include <linux/miscdevice.h>
18 19 20 21 22

#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/kvm_ppc.h>

23
#include "../mm/mmu_decl.h"
24
#include "booke.h"
25
#include "e500.h"
26

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
struct id {
	unsigned long val;
	struct id **pentry;
};

#define NUM_TIDS 256

/*
 * This table provide mappings from:
 * (guestAS,guestTID,guestPR) --> ID of physical cpu
 * guestAS	[0..1]
 * guestTID	[0..255]
 * guestPR	[0..1]
 * ID		[1..255]
 * Each vcpu keeps one vcpu_id_table.
 */
struct vcpu_id_table {
	struct id id[2][NUM_TIDS][2];
};

/*
 * This table provide reversed mappings of vcpu_id_table:
 * ID --> address of vcpu_id_table item.
 * Each physical core has one pcpu_id_table.
 */
struct pcpu_id_table {
	struct id *entry[NUM_TIDS];
};

static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);

/* This variable keeps last used shadow ID on local core.
 * The valid range of shadow ID is [1..255] */
static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);

/*
 * Allocate a free shadow id and setup a valid sid mapping in given entry.
 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
 *
 * The caller must have preemption disabled, and keep it that way until
 * it has finished with the returned shadow id (either written into the
 * TLB or arch.shadow_pid, or discarded).
 */
static inline int local_sid_setup_one(struct id *entry)
{
	unsigned long sid;
	int ret = -1;

75
	sid = __this_cpu_inc_return(pcpu_last_used_sid);
76
	if (sid < NUM_TIDS) {
77
		__this_cpu_write(pcpu_sids.entry[sid], entry);
78
		entry->val = sid;
79
		entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
		ret = sid;
	}

	/*
	 * If sid == NUM_TIDS, we've run out of sids.  We return -1, and
	 * the caller will invalidate everything and start over.
	 *
	 * sid > NUM_TIDS indicates a race, which we disable preemption to
	 * avoid.
	 */
	WARN_ON(sid > NUM_TIDS);

	return ret;
}

/*
 * Check if given entry contain a valid shadow id mapping.
 * An ID mapping is considered valid only if
 * both vcpu and pcpu know this mapping.
 *
 * The caller must have preemption disabled, and keep it that way until
 * it has finished with the returned shadow id (either written into the
 * TLB or arch.shadow_pid, or discarded).
 */
static inline int local_sid_lookup(struct id *entry)
{
	if (entry && entry->val != 0 &&
107 108
	    __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
	    entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
109 110 111 112 113 114 115
		return entry->val;
	return -1;
}

/* Invalidate all id mappings on local core -- call with preempt disabled */
static inline void local_sid_destroy_all(void)
{
116 117
	__this_cpu_write(pcpu_last_used_sid, 0);
	memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
}

static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
	return vcpu_e500->idt;
}

static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	kfree(vcpu_e500->idt);
	vcpu_e500->idt = NULL;
}

/* Map guest pid to shadow.
 * We use PID to keep shadow of current guest non-zero PID,
 * and use PID1 to keep shadow of guest zero PID.
 * So that guest tlbe with TID=0 can be accessed at any time */
static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	preempt_disable();
	vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
			get_cur_as(&vcpu_e500->vcpu),
			get_cur_pid(&vcpu_e500->vcpu),
			get_cur_pr(&vcpu_e500->vcpu), 1);
	vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
			get_cur_as(&vcpu_e500->vcpu), 0,
			get_cur_pr(&vcpu_e500->vcpu), 1);
	preempt_enable();
}

/* Invalidate all mappings on vcpu */
static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));

	/* Update shadow pid when mappings are changed */
	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}

/* Invalidate one ID mapping on vcpu */
static inline void kvmppc_e500_id_table_reset_one(
			       struct kvmppc_vcpu_e500 *vcpu_e500,
			       int as, int pid, int pr)
{
	struct vcpu_id_table *idt = vcpu_e500->idt;

	BUG_ON(as >= 2);
	BUG_ON(pid >= NUM_TIDS);
	BUG_ON(pr >= 2);

	idt->id[as][pid][pr].val = 0;
	idt->id[as][pid][pr].pentry = NULL;

	/* Update shadow pid when mappings are changed */
	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}

/*
 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
 * This function first lookup if a valid mapping exists,
 * if not, then creates a new one.
 *
 * The caller must have preemption disabled, and keep it that way until
 * it has finished with the returned shadow id (either written into the
 * TLB or arch.shadow_pid, or discarded).
 */
unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
				 unsigned int as, unsigned int gid,
				 unsigned int pr, int avoid_recursion)
{
	struct vcpu_id_table *idt = vcpu_e500->idt;
	int sid;

	BUG_ON(as >= 2);
	BUG_ON(gid >= NUM_TIDS);
	BUG_ON(pr >= 2);

	sid = local_sid_lookup(&idt->id[as][gid][pr]);

	while (sid <= 0) {
		/* No mapping yet */
		sid = local_sid_setup_one(&idt->id[as][gid][pr]);
		if (sid <= 0) {
			_tlbil_all();
			local_sid_destroy_all();
		}

		/* Update shadow pid when mappings are changed */
		if (!avoid_recursion)
			kvmppc_e500_recalc_shadow_pid(vcpu_e500);
	}

	return sid;
}

unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
				      struct kvm_book3e_206_tlb_entry *gtlbe)
{
	return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
				   get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
}

void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

	if (vcpu->arch.pid != pid) {
		vcpu_e500->pid[0] = vcpu->arch.pid = pid;
		kvmppc_e500_recalc_shadow_pid(vcpu_e500);
	}
}

/* gtlbe must not be mapped by more than one host tlbe */
void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
                           struct kvm_book3e_206_tlb_entry *gtlbe)
{
	struct vcpu_id_table *idt = vcpu_e500->idt;
236 237
	unsigned int pr, tid, ts;
	int pid;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
	u32 val, eaddr;
	unsigned long flags;

	ts = get_tlb_ts(gtlbe);
	tid = get_tlb_tid(gtlbe);

	preempt_disable();

	/* One guest ID may be mapped to two shadow IDs */
	for (pr = 0; pr < 2; pr++) {
		/*
		 * The shadow PID can have a valid mapping on at most one
		 * host CPU.  In the common case, it will be valid on this
		 * CPU, in which case we do a local invalidation of the
		 * specific address.
		 *
		 * If the shadow PID is not valid on the current host CPU,
		 * we invalidate the entire shadow PID.
		 */
		pid = local_sid_lookup(&idt->id[ts][tid][pr]);
		if (pid <= 0) {
			kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
			continue;
		}

		/*
		 * The guest is invalidating a 4K entry which is in a PID
		 * that has a valid shadow mapping on this host CPU.  We
		 * search host TLB to invalidate it's shadow TLB entry,
		 * similar to __tlbil_va except that we need to look in AS1.
		 */
		val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
		eaddr = get_tlb_eaddr(gtlbe);

		local_irq_save(flags);

		mtspr(SPRN_MAS6, val);
		asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
		val = mfspr(SPRN_MAS1);
		if (val & MAS1_VALID) {
			mtspr(SPRN_MAS1, val & ~MAS1_VALID);
			asm volatile("tlbwe");
		}

		local_irq_restore(flags);
	}

	preempt_enable();
}

void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	kvmppc_e500_id_table_reset_all(vcpu_e500);
}

void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
	/* Recalc shadow pid since MSR changes */
	kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
}

299
static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
300
{
301
	kvmppc_booke_vcpu_load(vcpu, cpu);
302 303 304

	/* Shadow PID may be expired on local core */
	kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
305 306
}

307
static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
308
{
309 310 311 312
#ifdef CONFIG_SPE
	if (vcpu->arch.shadow_msr & MSR_SPE)
		kvmppc_vcpu_disable_spe(vcpu);
#endif
313 314

	kvmppc_booke_vcpu_put(vcpu);
315 316 317 318 319 320 321 322 323 324 325 326 327 328
}

int kvmppc_core_check_processor_compat(void)
{
	int r;

	if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0)
		r = 0;
	else
		r = -ENOTSUPP;

	return r;
}

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
{
	struct kvm_book3e_206_tlb_entry *tlbe;

	/* Insert large initial mapping for guest. */
	tlbe = get_entry(vcpu_e500, 1, 0);
	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
	tlbe->mas2 = 0;
	tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;

	/* 4K map for serial output. Used by kernel wrapper. */
	tlbe = get_entry(vcpu_e500, 1, 1);
	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
	tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
	tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
}

346 347 348 349 350 351
int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

	kvmppc_e500_tlb_setup(vcpu_e500);

L
Liu Yu 已提交
352 353
	/* Registers init */
	vcpu->arch.pvr = mfspr(SPRN_PVR);
S
Scott Wood 已提交
354
	vcpu_e500->svr = mfspr(SPRN_SVR);
L
Liu Yu 已提交
355

356 357
	vcpu->arch.cpu_type = KVM_CPU_E500V2;

358 359 360
	return 0;
}

361 362
static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
				      struct kvm_sregs *sregs)
S
Scott Wood 已提交
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

	sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
	                       KVM_SREGS_E_PM;
	sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;

	sregs->u.e.impl.fsl.features = 0;
	sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
	sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
	sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;

	sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
	sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
	sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
	sregs->u.e.ivor_high[3] =
		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];

	kvmppc_get_sregs_ivor(vcpu, sregs);
382
	kvmppc_get_sregs_e500_tlb(vcpu, sregs);
383
	return 0;
S
Scott Wood 已提交
384 385
}

386 387
static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
				      struct kvm_sregs *sregs)
S
Scott Wood 已提交
388 389
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
390
	int ret;
S
Scott Wood 已提交
391 392 393 394 395 396 397

	if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
		vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
		vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
		vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
	}

398 399 400
	ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
	if (ret < 0)
		return ret;
S
Scott Wood 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421

	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
		return 0;

	if (sregs->u.e.features & KVM_SREGS_E_SPE) {
		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
			sregs->u.e.ivor_high[0];
		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
			sregs->u.e.ivor_high[1];
		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
			sregs->u.e.ivor_high[2];
	}

	if (sregs->u.e.features & KVM_SREGS_E_PM) {
		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
			sregs->u.e.ivor_high[3];
	}

	return kvmppc_set_sregs_ivor(vcpu, sregs);
}

422 423
static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
				   union kvmppc_one_reg *val)
424
{
425 426
	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
	return r;
427 428
}

429 430
static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
				   union kvmppc_one_reg *val)
431
{
432 433
	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
	return r;
434 435
}

436
static int kvmppc_core_vcpu_create_e500(struct kvm_vcpu *vcpu)
437 438 439 440
{
	struct kvmppc_vcpu_e500 *vcpu_e500;
	int err;

441 442
	BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
	vcpu_e500 = to_e500(vcpu);
443

444 445
	if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
		return -ENOMEM;
446

447 448
	err = kvmppc_e500_tlb_init(vcpu_e500);
	if (err)
449
		goto uninit_id;
450

A
Alexander Graf 已提交
451
	vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
452 453
	if (!vcpu->arch.shared) {
		err = -ENOMEM;
A
Alexander Graf 已提交
454
		goto uninit_tlb;
455
	}
A
Alexander Graf 已提交
456

457
	return 0;
458

A
Alexander Graf 已提交
459 460
uninit_tlb:
	kvmppc_e500_tlb_uninit(vcpu_e500);
461 462
uninit_id:
	kvmppc_e500_id_table_free(vcpu_e500);
463
	return err;
464 465
}

466
static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
467 468 469
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);

A
Alexander Graf 已提交
470
	free_page((unsigned long)vcpu->arch.shared);
471
	kvmppc_e500_tlb_uninit(vcpu_e500);
472
	kvmppc_e500_id_table_free(vcpu_e500);
473 474
}

475
static int kvmppc_core_init_vm_e500(struct kvm *kvm)
476 477 478 479
{
	return 0;
}

480
static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
481 482 483
{
}

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
static struct kvmppc_ops kvm_ops_e500 = {
	.get_sregs = kvmppc_core_get_sregs_e500,
	.set_sregs = kvmppc_core_set_sregs_e500,
	.get_one_reg = kvmppc_get_one_reg_e500,
	.set_one_reg = kvmppc_set_one_reg_e500,
	.vcpu_load   = kvmppc_core_vcpu_load_e500,
	.vcpu_put    = kvmppc_core_vcpu_put_e500,
	.vcpu_create = kvmppc_core_vcpu_create_e500,
	.vcpu_free   = kvmppc_core_vcpu_free_e500,
	.mmu_destroy  = kvmppc_mmu_destroy_e500,
	.init_vm = kvmppc_core_init_vm_e500,
	.destroy_vm = kvmppc_core_destroy_vm_e500,
	.emulate_op = kvmppc_core_emulate_op_e500,
	.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
	.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
};

501
static int __init kvmppc_e500_init(void)
502
{
503 504
	int r, i;
	unsigned long ivor[3];
505 506 507
	/* Process remaining handlers above the generic first 16 */
	unsigned long *handler = &kvmppc_booke_handler_addr[16];
	unsigned long handler_len;
508
	unsigned long max_ivor = 0;
509

510 511
	r = kvmppc_core_check_processor_compat();
	if (r)
512
		goto err_out;
513

514 515
	r = kvmppc_booke_init();
	if (r)
516
		goto err_out;
517

518 519 520 521 522
	/* copy extra E500 exception handlers */
	ivor[0] = mfspr(SPRN_IVOR32);
	ivor[1] = mfspr(SPRN_IVOR33);
	ivor[2] = mfspr(SPRN_IVOR34);
	for (i = 0; i < 3; i++) {
523 524
		if (ivor[i] > ivor[max_ivor])
			max_ivor = i;
525

526
		handler_len = handler[i + 1] - handler[i];
527
		memcpy((void *)kvmppc_booke_handlers + ivor[i],
528
		       (void *)handler[i], handler_len);
529
	}
530 531 532
	handler_len = handler[max_ivor + 1] - handler[max_ivor];
	flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
			   ivor[max_ivor] + handler_len);
533

534 535 536 537 538 539
	r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
	if (r)
		goto err_out;
	kvm_ops_e500.owner = THIS_MODULE;
	kvmppc_pr_ops = &kvm_ops_e500;

540 541
err_out:
	return r;
542 543
}

544
static void __exit kvmppc_e500_exit(void)
545
{
546
	kvmppc_pr_ops = NULL;
547 548 549 550 551
	kvmppc_booke_exit();
}

module_init(kvmppc_e500_init);
module_exit(kvmppc_e500_exit);
552 553
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");