vgic-mmio-v3.c 22.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * VGICv3 MMIO handling functions
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/iodev.h>
#include <kvm/arm_vgic.h>

#include <asm/kvm_emulate.h>
21 22
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
23 24 25 26

#include "vgic.h"
#include "vgic-mmio.h"

27
/* extract @num bytes at @offset bytes offset in data */
28
unsigned long extract_bytes(u64 data, unsigned int offset,
29
			    unsigned int num)
30 31 32 33
{
	return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
}

34
/* allows updates of any half of a 64-bit register (or the whole thing) */
35 36
u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
		     unsigned long val)
37 38 39 40 41 42 43 44 45 46
{
	int lower = (offset & 4) * 8;
	int upper = lower + 8 * len - 1;

	reg &= ~GENMASK_ULL(upper, lower);
	val &= GENMASK_ULL(len * 8 - 1, 0);

	return reg | ((u64)val << lower);
}

47 48 49 50 51 52 53
bool vgic_has_its(struct kvm *kvm)
{
	struct vgic_dist *dist = &kvm->arch.vgic;

	if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
		return false;

54
	return dist->has_its;
55 56
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70
static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
					    gpa_t addr, unsigned int len)
{
	u32 value = 0;

	switch (addr & 0x0c) {
	case GICD_CTLR:
		if (vcpu->kvm->arch.vgic.enabled)
			value |= GICD_CTLR_ENABLE_SS_G1;
		value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
		break;
	case GICD_TYPER:
		value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
		value = (value >> 5) - 1;
71 72 73 74 75 76
		if (vgic_has_its(vcpu->kvm)) {
			value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
			value |= GICD_TYPER_LPIS;
		} else {
			value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
		}
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
		break;
	case GICD_IIDR:
		value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
		break;
	default:
		return 0;
	}

	return value;
}

static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
				    gpa_t addr, unsigned int len,
				    unsigned long val)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	bool was_enabled = dist->enabled;

	switch (addr & 0x0c) {
	case GICD_CTLR:
		dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;

		if (!was_enabled && dist->enabled)
			vgic_kick_vcpus(vcpu->kvm);
		break;
	case GICD_TYPER:
	case GICD_IIDR:
		return;
	}
}

108 109 110 111 112
static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
					    gpa_t addr, unsigned int len)
{
	int intid = VGIC_ADDR_TO_INTID(addr, 64);
	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
113
	unsigned long ret = 0;
114 115 116 117 118

	if (!irq)
		return 0;

	/* The upper word is RAZ for us. */
119 120
	if (!(addr & 4))
		ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
121

122 123
	vgic_put_irq(vcpu->kvm, irq);
	return ret;
124 125 126 127 128 129 130
}

static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
				    gpa_t addr, unsigned int len,
				    unsigned long val)
{
	int intid = VGIC_ADDR_TO_INTID(addr, 64);
131
	struct vgic_irq *irq;
132 133 134 135 136

	/* The upper word is WI for us since we don't implement Aff3. */
	if (addr & 4)
		return;

137 138 139 140 141
	irq = vgic_get_irq(vcpu->kvm, NULL, intid);

	if (!irq)
		return;

142 143 144 145 146 147 148
	spin_lock(&irq->irq_lock);

	/* We only care about and preserve Aff0, Aff1 and Aff2. */
	irq->mpidr = val & GENMASK(23, 0);
	irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);

	spin_unlock(&irq->irq_lock);
149
	vgic_put_irq(vcpu->kvm, irq);
150 151
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
					     gpa_t addr, unsigned int len)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;

	return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
}


static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
				     gpa_t addr, unsigned int len,
				     unsigned long val)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	bool was_enabled = vgic_cpu->lpis_enabled;

	if (!vgic_has_its(vcpu->kvm))
		return;

	vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;

173 174
	if (!was_enabled && vgic_cpu->lpis_enabled)
		vgic_enable_lpis(vcpu);
175 176
}

177 178 179 180 181 182 183
static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
					      gpa_t addr, unsigned int len)
{
	unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
	int target_vcpu_id = vcpu->vcpu_id;
	u64 value;

184
	value = (u64)(mpidr & GENMASK(23, 0)) << 32;
185 186 187
	value |= ((target_vcpu_id & 0xffff) << 8);
	if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
		value |= GICR_TYPER_LAST;
188 189
	if (vgic_has_its(vcpu->kvm))
		value |= GICR_TYPER_PLPIS;
190 191 192 193 194 195 196 197 198 199

	return extract_bytes(value, addr & 7, len);
}

static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
					     gpa_t addr, unsigned int len)
{
	return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
}

200 201 202 203 204 205 206 207 208 209 210 211
static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
					      gpa_t addr, unsigned int len)
{
	switch (addr & 0xffff) {
	case GICD_PIDR2:
		/* report a GICv3 compliant implementation */
		return 0x3b;
	}

	return 0;
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
						  gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	u32 value = 0;
	int i;

	/*
	 * pending state of interrupt is latched in pending_latch variable.
	 * Userspace will save and restore pending state and line_level
	 * separately.
	 * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
	 * for handling of ISPENDR and ICPENDR.
	 */
	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		if (irq->pending_latch)
			value |= (1U << i);

		vgic_put_irq(vcpu->kvm, irq);
	}

	return value;
}

static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
					  gpa_t addr, unsigned int len,
					  unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;

	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		if (test_bit(i, &val)) {
			/*
			 * pending_latch is set irrespective of irq type
			 * (level or edge) to avoid dependency that VM should
			 * restore irq config before pending info.
			 */
			irq->pending_latch = true;
			vgic_queue_irq_unlock(vcpu->kvm, irq);
		} else {
			irq->pending_latch = false;
			spin_unlock(&irq->irq_lock);
		}

		vgic_put_irq(vcpu->kvm, irq);
	}
}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
/* We want to avoid outer shareable. */
u64 vgic_sanitise_shareability(u64 field)
{
	switch (field) {
	case GIC_BASER_OuterShareable:
		return GIC_BASER_InnerShareable;
	default:
		return field;
	}
}

/* Avoid any inner non-cacheable mapping. */
u64 vgic_sanitise_inner_cacheability(u64 field)
{
	switch (field) {
	case GIC_BASER_CACHE_nCnB:
	case GIC_BASER_CACHE_nC:
		return GIC_BASER_CACHE_RaWb;
	default:
		return field;
	}
}

/* Non-cacheable or same-as-inner are OK. */
u64 vgic_sanitise_outer_cacheability(u64 field)
{
	switch (field) {
	case GIC_BASER_CACHE_SameAsInner:
	case GIC_BASER_CACHE_nC:
		return field;
	default:
		return GIC_BASER_CACHE_nC;
	}
}

u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
			u64 (*sanitise_fn)(u64))
{
	u64 field = (reg & field_mask) >> field_shift;

	field = sanitise_fn(field) << field_shift;
	return (reg & ~field_mask) | field;
}

#define PROPBASER_RES0_MASK						\
	(GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
#define PENDBASER_RES0_MASK						\
	(BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) |	\
	 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))

static u64 vgic_sanitise_pendbaser(u64 reg)
{
	reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
				  GICR_PENDBASER_SHAREABILITY_SHIFT,
				  vgic_sanitise_shareability);
	reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
				  GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
				  vgic_sanitise_inner_cacheability);
	reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
				  GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
				  vgic_sanitise_outer_cacheability);

	reg &= ~PENDBASER_RES0_MASK;
	reg &= ~GENMASK_ULL(51, 48);

	return reg;
}

static u64 vgic_sanitise_propbaser(u64 reg)
{
	reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
				  GICR_PROPBASER_SHAREABILITY_SHIFT,
				  vgic_sanitise_shareability);
	reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
				  GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
				  vgic_sanitise_inner_cacheability);
	reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
				  GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
				  vgic_sanitise_outer_cacheability);

	reg &= ~PROPBASER_RES0_MASK;
	reg &= ~GENMASK_ULL(51, 48);
	return reg;
}

static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
					     gpa_t addr, unsigned int len)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	return extract_bytes(dist->propbaser, addr & 7, len);
}

static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
				     gpa_t addr, unsigned int len,
				     unsigned long val)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
365
	u64 old_propbaser, propbaser;
366 367 368 369 370

	/* Storing a value with LPIs already enabled is undefined */
	if (vgic_cpu->lpis_enabled)
		return;

371 372 373 374 375 376 377
	do {
		old_propbaser = dist->propbaser;
		propbaser = old_propbaser;
		propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
		propbaser = vgic_sanitise_propbaser(propbaser);
	} while (cmpxchg64(&dist->propbaser, old_propbaser,
			   propbaser) != old_propbaser);
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
}

static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
					     gpa_t addr, unsigned int len)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;

	return extract_bytes(vgic_cpu->pendbaser, addr & 7, len);
}

static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
				     gpa_t addr, unsigned int len,
				     unsigned long val)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
393
	u64 old_pendbaser, pendbaser;
394 395 396 397 398

	/* Storing a value with LPIs already enabled is undefined */
	if (vgic_cpu->lpis_enabled)
		return;

399 400 401 402 403 404 405
	do {
		old_pendbaser = vgic_cpu->pendbaser;
		pendbaser = old_pendbaser;
		pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
		pendbaser = vgic_sanitise_pendbaser(pendbaser);
	} while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
			   pendbaser) != old_pendbaser);
406 407
}

408 409 410 411 412 413 414
/*
 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
 * redistributors, while SPIs are covered by registers in the distributor
 * block. Trying to set private IRQs in this block gets ignored.
 * We take some special care here to fix the calculation of the register
 * offset.
 */
415
#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
416 417 418 419 420 421 422 423 424 425 426 427 428 429
	{								\
		.reg_offset = off,					\
		.bits_per_irq = bpi,					\
		.len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8,		\
		.access_flags = acc,					\
		.read = vgic_mmio_read_raz,				\
		.write = vgic_mmio_write_wi,				\
	}, {								\
		.reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8,	\
		.bits_per_irq = bpi,					\
		.len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8,	\
		.access_flags = acc,					\
		.read = rd,						\
		.write = wr,						\
430 431
		.uaccess_read = ur,					\
		.uaccess_write = uw,					\
432 433 434 435
	}

static const struct vgic_register_region vgic_v3_dist_registers[] = {
	REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
436
		vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16,
437
		VGIC_ACCESS_32bit),
438 439 440
	REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
		vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
		VGIC_ACCESS_32bit),
441
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
442
		vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
443 444
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
445
		vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
446 447
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
448
		vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
449 450
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
451 452
		vgic_mmio_read_pending, vgic_mmio_write_spending,
		vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
453 454
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
455 456
		vgic_mmio_read_pending, vgic_mmio_write_cpending,
		vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
457 458
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
459
		vgic_mmio_read_active, vgic_mmio_write_sactive, NULL, NULL, 1,
460 461
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
462
		vgic_mmio_read_active, vgic_mmio_write_cactive, NULL, NULL, 1,
463 464
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
465 466
		vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
		8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
467
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
468
		vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
469 470
		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
471
		vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
472 473
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
474
		vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
475 476
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
477
		vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
478 479
		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
480
		vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
481 482 483 484 485
		VGIC_ACCESS_32bit),
};

static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
	REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
486
		vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
487
		VGIC_ACCESS_32bit),
488 489 490
	REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
		VGIC_ACCESS_32bit),
491
	REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
492
		vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
493 494
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
495
		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
496
		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
497 498 499
	REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
		VGIC_ACCESS_32bit),
500
	REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
501
		vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
502 503
		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
504
		vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
505 506
		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
507
		vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
508 509 510 511 512 513 514 515 516 517 518 519 520
		VGIC_ACCESS_32bit),
};

static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
	REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0,
		vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0,
		vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0,
		vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
		VGIC_ACCESS_32bit),
521 522 523
	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0,
		vgic_mmio_read_pending, vgic_mmio_write_spending,
		vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
524
		VGIC_ACCESS_32bit),
525 526 527
	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0,
		vgic_mmio_read_pending, vgic_mmio_write_cpending,
		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0,
		vgic_mmio_read_active, vgic_mmio_write_sactive, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_ICACTIVER0,
		vgic_mmio_read_active, vgic_mmio_write_cactive, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
		vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
	REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0,
		vgic_mmio_read_config, vgic_mmio_write_config, 8,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0,
		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GICR_NSACR,
		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
		VGIC_ACCESS_32bit),
};

unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
{
	dev->regions = vgic_v3_dist_registers;
	dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);

	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);

	return SZ_64K;
}

int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address)
{
	struct kvm_vcpu *vcpu;
	int c, ret = 0;

	kvm_for_each_vcpu(c, vcpu, kvm) {
		gpa_t rd_base = redist_base_address + c * SZ_64K * 2;
		gpa_t sgi_base = rd_base + SZ_64K;
567 568
		struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
		struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
569 570 571

		kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
		rd_dev->base_addr = rd_base;
572
		rd_dev->iodev_type = IODEV_REDIST;
573 574 575 576 577 578 579 580 581 582 583 584 585 586
		rd_dev->regions = vgic_v3_rdbase_registers;
		rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
		rd_dev->redist_vcpu = vcpu;

		mutex_lock(&kvm->slots_lock);
		ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
					      SZ_64K, &rd_dev->dev);
		mutex_unlock(&kvm->slots_lock);

		if (ret)
			break;

		kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
		sgi_dev->base_addr = sgi_base;
587
		sgi_dev->iodev_type = IODEV_REDIST;
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		sgi_dev->regions = vgic_v3_sgibase_registers;
		sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
		sgi_dev->redist_vcpu = vcpu;

		mutex_lock(&kvm->slots_lock);
		ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
					      SZ_64K, &sgi_dev->dev);
		mutex_unlock(&kvm->slots_lock);
		if (ret) {
			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
						  &rd_dev->dev);
			break;
		}
	}

	if (ret) {
		/* The current c failed, so we start with the previous one. */
		for (c--; c >= 0; c--) {
606 607 608 609
			struct vgic_cpu *vgic_cpu;

			vcpu = kvm_get_vcpu(kvm, c);
			vgic_cpu = &vcpu->arch.vgic_cpu;
610
			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
611
						  &vgic_cpu->rd_iodev.dev);
612
			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
613
						  &vgic_cpu->sgi_iodev.dev);
614 615 616 617 618
		}
	}

	return ret;
}
619

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
{
	const struct vgic_register_region *region;
	struct vgic_io_device iodev;
	struct vgic_reg_attr reg_attr;
	struct kvm_vcpu *vcpu;
	gpa_t addr;
	int ret;

	ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
	if (ret)
		return ret;

	vcpu = reg_attr.vcpu;
	addr = reg_attr.addr;

	switch (attr->group) {
	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
		iodev.regions = vgic_v3_dist_registers;
		iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
		iodev.base_addr = 0;
		break;
	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
		iodev.regions = vgic_v3_rdbase_registers;
		iodev.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
		iodev.base_addr = 0;
		break;
	}
	default:
		return -ENXIO;
	}

	/* We only support aligned 32-bit accesses. */
	if (addr & 3)
		return -ENXIO;

	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
	if (!region)
		return -ENXIO;

	return 0;
}
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
/*
 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
 * generation register ICC_SGI1R_EL1) with a given VCPU.
 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
 * return -1.
 */
static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
{
	unsigned long affinity;
	int level0;

	/*
	 * Split the current VCPU's MPIDR into affinity level 0 and the
	 * rest as this is what we have to compare against.
	 */
	affinity = kvm_vcpu_get_mpidr_aff(vcpu);
	level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
	affinity &= ~MPIDR_LEVEL_MASK;

	/* bail out if the upper three levels don't match */
	if (sgi_aff != affinity)
		return -1;

	/* Is this VCPU's bit set in the mask ? */
	if (!(sgi_cpu_mask & BIT(level0)))
		return -1;

	return level0;
}

/*
 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
 * so provide a wrapper to use the existing defines to isolate a certain
 * affinity level.
 */
#define SGI_AFFINITY_LEVEL(reg, level) \
	((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
	>> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))

/**
 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
 * @vcpu: The VCPU requesting a SGI
 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
 *
 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
 * This will trap in sys_regs.c and call this function.
 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
 * target processors as well as a bitmask of 16 Aff0 CPUs.
 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
 * check for matching ones. If this bit is set, we signal all, but not the
 * calling VCPU.
 */
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
{
	struct kvm *kvm = vcpu->kvm;
	struct kvm_vcpu *c_vcpu;
	u16 target_cpus;
	u64 mpidr;
	int sgi, c;
	int vcpu_id = vcpu->vcpu_id;
	bool broadcast;

	sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
725
	broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
	mpidr = SGI_AFFINITY_LEVEL(reg, 3);
	mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
	mpidr |= SGI_AFFINITY_LEVEL(reg, 1);

	/*
	 * We iterate over all VCPUs to find the MPIDRs matching the request.
	 * If we have handled one CPU, we clear its bit to detect early
	 * if we are already finished. This avoids iterating through all
	 * VCPUs when most of the times we just signal a single VCPU.
	 */
	kvm_for_each_vcpu(c, c_vcpu, kvm) {
		struct vgic_irq *irq;

		/* Exit early if we have dealt with all requested CPUs */
		if (!broadcast && target_cpus == 0)
			break;

		/* Don't signal the calling VCPU */
		if (broadcast && c == vcpu_id)
			continue;

		if (!broadcast) {
			int level0;

			level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
			if (level0 == -1)
				continue;

			/* remove this matching VCPU from the mask */
			target_cpus &= ~BIT(level0);
		}

		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);

		spin_lock(&irq->irq_lock);
762
		irq->pending_latch = true;
763 764

		vgic_queue_irq_unlock(vcpu->kvm, irq);
765
		vgic_put_irq(vcpu->kvm, irq);
766 767
	}
}
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799

int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
			 int offset, u32 *val)
{
	struct vgic_io_device dev = {
		.regions = vgic_v3_dist_registers,
		.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
	};

	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
}

int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
			   int offset, u32 *val)
{
	struct vgic_io_device rd_dev = {
		.regions = vgic_v3_rdbase_registers,
		.nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers),
	};

	struct vgic_io_device sgi_dev = {
		.regions = vgic_v3_sgibase_registers,
		.nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers),
	};

	/* SGI_base is the next 64K frame after RD_base */
	if (offset >= SZ_64K)
		return vgic_uaccess(vcpu, &sgi_dev, is_write, offset - SZ_64K,
				    val);
	else
		return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
}