vgic.c 54.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2012 ARM Ltd.
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 */

19
#include <linux/cpu.h>
20 21 22 23
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/io.h>
24 25 26
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
27
#include <linux/uaccess.h>
28 29 30

#include <linux/irqchip/arm-gic.h>

31
#include <asm/kvm_emulate.h>
32 33
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
34

35 36 37 38
/*
 * How the whole thing works (courtesy of Christoffer Dall):
 *
 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 40 41 42 43
 *   something is pending on the CPU interface.
 * - Interrupts that are pending on the distributor are stored on the
 *   vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
 *   ioctls and guest mmio ops, and other in-kernel peripherals such as the
 *   arch. timers).
44 45 46 47
 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
 *   recalculated
 * - To calculate the oracle, we need info for each cpu from
 *   compute_pending_for_cpu, which considers:
48 49
 *   - PPI: dist->irq_pending & dist->irq_enable
 *   - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50
 *   - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 52 53
 *     registers, stored on each vcpu. We only keep one bit of
 *     information per interrupt, making sure that only one vcpu can
 *     accept the interrupt.
54
 * - If any of the above state changes, we must recalculate the oracle.
55 56 57 58 59 60 61 62 63
 * - The same is true when injecting an interrupt, except that we only
 *   consider a single interrupt at a time. The irq_spi_cpu array
 *   contains the target CPU for each SPI.
 *
 * The handling of level interrupts adds some extra complexity. We
 * need to track when the interrupt has been EOIed, so we can sample
 * the 'line' again. This is achieved as such:
 *
 * - When a level interrupt is moved onto a vcpu, the corresponding
64
 *   bit in irq_queued is set. As long as this bit is set, the line
65 66 67 68
 *   will be ignored for further interrupts. The interrupt is injected
 *   into the vcpu with the GICH_LR_EOI bit set (generate a
 *   maintenance interrupt on EOI).
 * - When the interrupt is EOIed, the maintenance interrupt fires,
69
 *   and clears the corresponding bit in irq_queued. This allows the
70
 *   interrupt line to be sampled again.
71 72 73 74 75
 * - Note that level-triggered interrupts can also be set to pending from
 *   writes to GICD_ISPENDRn and lowering the external input line does not
 *   cause the interrupt to become inactive in such a situation.
 *   Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
 *   inactive as long as the external input line is held high.
76 77
 */

78 79 80
#define VGIC_ADDR_UNDEF		(-1)
#define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)

81 82 83 84
#define PRODUCT_ID_KVM		0x4b	/* ASCII code K */
#define IMPLEMENTER_ARM		0x43b
#define GICC_ARCH_VERSION_V2	0x2

85 86 87 88 89 90 91 92 93
#define ACCESS_READ_VALUE	(1 << 0)
#define ACCESS_READ_RAZ		(0 << 0)
#define ACCESS_READ_MASK(x)	((x) & (1 << 0))
#define ACCESS_WRITE_IGNORED	(0 << 1)
#define ACCESS_WRITE_SETBIT	(1 << 1)
#define ACCESS_WRITE_CLEARBIT	(2 << 1)
#define ACCESS_WRITE_VALUE	(3 << 1)
#define ACCESS_WRITE_MASK(x)	((x) & (3 << 1))

94
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
95
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
96
static void vgic_update_state(struct kvm *kvm);
97
static void vgic_kick_vcpus(struct kvm *kvm);
98
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
99 100
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
101 102
static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
103

104 105
static const struct vgic_ops *vgic_ops;
static const struct vgic_params *vgic;
106

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
/*
 * struct vgic_bitmap contains unions that provide two views of
 * the same data. In one case it is an array of registers of
 * u32's, and in the other case it is a bitmap of unsigned
 * longs.
 *
 * This does not work on 64-bit BE systems, because the bitmap access
 * will store two consecutive 32-bit words with the higher-addressed
 * register's bits at the lower index and the lower-addressed register's
 * bits at the higher index.
 *
 * Therefore, swizzle the register index when accessing the 32-bit word
 * registers to access the right register's value.
 */
#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
#define REG_OFFSET_SWIZZLE	1
#else
#define REG_OFFSET_SWIZZLE	0
#endif
126 127 128 129 130 131

static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
				int cpuid, u32 offset)
{
	offset >>= 2;
	if (!offset)
132
		return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE);
133
	else
134
		return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
}

static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
				   int cpuid, int irq)
{
	if (irq < VGIC_NR_PRIVATE_IRQS)
		return test_bit(irq, x->percpu[cpuid].reg_ul);

	return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
}

static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
				    int irq, int val)
{
	unsigned long *reg;

	if (irq < VGIC_NR_PRIVATE_IRQS) {
		reg = x->percpu[cpuid].reg_ul;
	} else {
		reg =  x->shared.reg_ul;
		irq -= VGIC_NR_PRIVATE_IRQS;
	}

	if (val)
		set_bit(irq, reg);
	else
		clear_bit(irq, reg);
}

static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
{
	if (unlikely(cpuid >= VGIC_MAX_CPUS))
		return NULL;
	return x->percpu[cpuid].reg_ul;
}

static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
{
	return x->shared.reg_ul;
}

static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
{
	offset >>= 2;
	BUG_ON(offset > (VGIC_NR_IRQS / 4));
180
	if (offset < 8)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
		return x->percpu[cpuid] + offset;
	else
		return x->shared + offset - 8;
}

#define VGIC_CFG_LEVEL	0
#define VGIC_CFG_EDGE	1

static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	int irq_val;

	irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
	return irq_val == VGIC_CFG_EDGE;
}

static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
}

205
static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
206 207 208
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

209
	return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
210 211
}

212
static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
213 214 215
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

216
	vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
217 218
}

219
static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
220 221 222
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

223
	vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
}

static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
}

static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
}

static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
}

static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
}

261 262 263 264
static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

265
	return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
266 267
}

268
static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
269 270 271
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

272
	vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
273 274
}

275
static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
276 277 278
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

279
	vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
}

static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
{
	if (irq < VGIC_NR_PRIVATE_IRQS)
		set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
	else
		set_bit(irq - VGIC_NR_PRIVATE_IRQS,
			vcpu->arch.vgic_cpu.pending_shared);
}

static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
{
	if (irq < VGIC_NR_PRIVATE_IRQS)
		clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
	else
		clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
			  vcpu->arch.vgic_cpu.pending_shared);
}

300 301 302 303 304
static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
{
	return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
}

305 306
static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
{
307
	return le32_to_cpu(*((u32 *)mmio->data)) & mask;
308 309 310 311
}

static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
{
312
	*((u32 *)mmio->data) = cpu_to_le32(value) & mask;
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
}

/**
 * vgic_reg_access - access vgic register
 * @mmio:   pointer to the data describing the mmio access
 * @reg:    pointer to the virtual backing of vgic distributor data
 * @offset: least significant 2 bits used for word offset
 * @mode:   ACCESS_ mode (see defines above)
 *
 * Helper to make vgic register access easier using one of the access
 * modes defined for vgic register access
 * (read,raz,write-ignored,setbit,clearbit,write)
 */
static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
			    phys_addr_t offset, int mode)
{
	int word_offset = (offset & 3) * 8;
	u32 mask = (1UL << (mmio->len * 8)) - 1;
	u32 regval;

	/*
	 * Any alignment fault should have been delivered to the guest
	 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
	 */

	if (reg) {
		regval = *reg;
	} else {
		BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
		regval = 0;
	}

	if (mmio->is_write) {
		u32 data = mmio_data_read(mmio, mask) << word_offset;
		switch (ACCESS_WRITE_MASK(mode)) {
		case ACCESS_WRITE_IGNORED:
			return;

		case ACCESS_WRITE_SETBIT:
			regval |= data;
			break;

		case ACCESS_WRITE_CLEARBIT:
			regval &= ~data;
			break;

		case ACCESS_WRITE_VALUE:
			regval = (regval & ~(mask << word_offset)) | data;
			break;
		}
		*reg = regval;
	} else {
		switch (ACCESS_READ_MASK(mode)) {
		case ACCESS_READ_RAZ:
			regval = 0;
			/* fall through */

		case ACCESS_READ_VALUE:
			mmio_data_write(mmio, mask, regval >> word_offset);
		}
	}
}

376 377 378 379 380 381 382
static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
			     struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
	u32 reg;
	u32 word_offset = offset & 3;

	switch (offset & ~3) {
383
	case 0:			/* GICD_CTLR */
384 385 386 387 388 389 390 391 392 393
		reg = vcpu->kvm->arch.vgic.enabled;
		vgic_reg_access(mmio, &reg, word_offset,
				ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
		if (mmio->is_write) {
			vcpu->kvm->arch.vgic.enabled = reg & 1;
			vgic_update_state(vcpu->kvm);
			return true;
		}
		break;

394
	case 4:			/* GICD_TYPER */
395 396 397 398 399 400
		reg  = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
		reg |= (VGIC_NR_IRQS >> 5) - 1;
		vgic_reg_access(mmio, &reg, word_offset,
				ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
		break;

401 402
	case 8:			/* GICD_IIDR */
		reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
		vgic_reg_access(mmio, &reg, word_offset,
				ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
		break;
	}

	return false;
}

static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
			       struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
	vgic_reg_access(mmio, NULL, offset,
			ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
	return false;
}

static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
				       struct kvm_exit_mmio *mmio,
				       phys_addr_t offset)
{
	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
				       vcpu->vcpu_id, offset);
	vgic_reg_access(mmio, reg, offset,
			ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
	if (mmio->is_write) {
		vgic_update_state(vcpu->kvm);
		return true;
	}

	return false;
}

static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
					 struct kvm_exit_mmio *mmio,
					 phys_addr_t offset)
{
	u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
				       vcpu->vcpu_id, offset);
	vgic_reg_access(mmio, reg, offset,
			ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
	if (mmio->is_write) {
		if (offset < 4) /* Force SGI enabled */
			*reg |= 0xffff;
446
		vgic_retire_disabled_irqs(vcpu);
447 448 449 450 451 452 453 454 455 456 457
		vgic_update_state(vcpu->kvm);
		return true;
	}

	return false;
}

static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
					struct kvm_exit_mmio *mmio,
					phys_addr_t offset)
{
458
	u32 *reg, orig;
459 460 461 462 463 464 465 466
	u32 level_mask;
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
	level_mask = (~(*reg));

	/* Mark both level and edge triggered irqs as pending */
	reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
467
	orig = *reg;
468 469
	vgic_reg_access(mmio, reg, offset,
			ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
470

471
	if (mmio->is_write) {
472 473 474 475 476 477 478
		/* Set the soft-pending flag only for level-triggered irqs */
		reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
					  vcpu->vcpu_id, offset);
		vgic_reg_access(mmio, reg, offset,
				ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
		*reg &= level_mask;

479 480 481 482 483 484
		/* Ignore writes to SGIs */
		if (offset < 2) {
			*reg &= ~0xffff;
			*reg |= orig & 0xffff;
		}

485 486 487 488 489 490 491 492 493 494 495
		vgic_update_state(vcpu->kvm);
		return true;
	}

	return false;
}

static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
					  struct kvm_exit_mmio *mmio,
					  phys_addr_t offset)
{
496
	u32 *level_active;
497
	u32 *reg, orig;
498 499 500
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
501
	orig = *reg;
502 503 504
	vgic_reg_access(mmio, reg, offset,
			ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
	if (mmio->is_write) {
505 506 507 508 509 510 511
		/* Re-set level triggered level-active interrupts */
		level_active = vgic_bitmap_get_reg(&dist->irq_level,
					  vcpu->vcpu_id, offset);
		reg = vgic_bitmap_get_reg(&dist->irq_pending,
					  vcpu->vcpu_id, offset);
		*reg |= *level_active;

512 513 514 515 516 517
		/* Ignore writes to SGIs */
		if (offset < 2) {
			*reg &= ~0xffff;
			*reg |= orig & 0xffff;
		}

518 519 520 521 522 523
		/* Clear soft-pending flags */
		reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
					  vcpu->vcpu_id, offset);
		vgic_reg_access(mmio, reg, offset,
				ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
		vgic_update_state(vcpu->kvm);
		return true;
	}

	return false;
}

static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
				     struct kvm_exit_mmio *mmio,
				     phys_addr_t offset)
{
	u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
					vcpu->vcpu_id, offset);
	vgic_reg_access(mmio, reg, offset,
			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
	return false;
}

#define GICD_ITARGETSR_SIZE	32
#define GICD_CPUTARGETS_BITS	8
#define GICD_IRQS_PER_ITARGETSR	(GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
{
	struct vgic_dist *dist = &kvm->arch.vgic;
548
	int i;
549 550 551 552
	u32 val = 0;

	irq -= VGIC_NR_PRIVATE_IRQS;

553 554
	for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
		val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656

	return val;
}

static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
{
	struct vgic_dist *dist = &kvm->arch.vgic;
	struct kvm_vcpu *vcpu;
	int i, c;
	unsigned long *bmap;
	u32 target;

	irq -= VGIC_NR_PRIVATE_IRQS;

	/*
	 * Pick the LSB in each byte. This ensures we target exactly
	 * one vcpu per IRQ. If the byte is null, assume we target
	 * CPU0.
	 */
	for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
		int shift = i * GICD_CPUTARGETS_BITS;
		target = ffs((val >> shift) & 0xffU);
		target = target ? (target - 1) : 0;
		dist->irq_spi_cpu[irq + i] = target;
		kvm_for_each_vcpu(c, vcpu, kvm) {
			bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
			if (c == target)
				set_bit(irq + i, bmap);
			else
				clear_bit(irq + i, bmap);
		}
	}
}

static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
				   struct kvm_exit_mmio *mmio,
				   phys_addr_t offset)
{
	u32 reg;

	/* We treat the banked interrupts targets as read-only */
	if (offset < 32) {
		u32 roreg = 1 << vcpu->vcpu_id;
		roreg |= roreg << 8;
		roreg |= roreg << 16;

		vgic_reg_access(mmio, &roreg, offset,
				ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
		return false;
	}

	reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
	vgic_reg_access(mmio, &reg, offset,
			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
	if (mmio->is_write) {
		vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
		vgic_update_state(vcpu->kvm);
		return true;
	}

	return false;
}

static u32 vgic_cfg_expand(u16 val)
{
	u32 res = 0;
	int i;

	/*
	 * Turn a 16bit value like abcd...mnop into a 32bit word
	 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
	 */
	for (i = 0; i < 16; i++)
		res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);

	return res;
}

static u16 vgic_cfg_compress(u32 val)
{
	u16 res = 0;
	int i;

	/*
	 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
	 * abcd...mnop which is what we really care about.
	 */
	for (i = 0; i < 16; i++)
		res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;

	return res;
}

/*
 * The distributor uses 2 bits per IRQ for the CFG register, but the
 * LSB is always 0. As such, we only keep the upper bit, and use the
 * two above functions to compress/expand the bits
 */
static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
				struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
	u32 val;
657 658 659
	u32 *reg;

	reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
660
				  vcpu->vcpu_id, offset >> 1);
661

662
	if (offset & 4)
663 664 665 666 667 668 669 670
		val = *reg >> 16;
	else
		val = *reg & 0xffff;

	val = vgic_cfg_expand(val);
	vgic_reg_access(mmio, &val, offset,
			ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
	if (mmio->is_write) {
671
		if (offset < 8) {
672 673 674 675 676
			*reg = ~0U; /* Force PPIs/SGIs to 1 */
			return false;
		}

		val = vgic_cfg_compress(val);
677
		if (offset & 4) {
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
			*reg &= 0xffff;
			*reg |= val << 16;
		} else {
			*reg &= 0xffff << 16;
			*reg |= val;
		}
	}

	return false;
}

static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
				struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
	u32 reg;
	vgic_reg_access(mmio, &reg, offset,
			ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
	if (mmio->is_write) {
		vgic_dispatch_sgi(vcpu, reg);
		vgic_update_state(vcpu->kvm);
		return true;
	}

	return false;
}

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
/**
 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
 *
 * Move any pending IRQs that have already been assigned to LRs back to the
 * emulated distributor state so that the complete emulated state can be read
 * from the main emulation structures without investigating the LRs.
 *
 * Note that IRQs in the active state in the LRs get their pending state moved
 * to the distributor but the active state stays in the LRs, because we don't
 * track the active state on the distributor side.
 */
static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	int vcpu_id = vcpu->vcpu_id;
721
	int i;
722 723

	for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
724
		struct vgic_lr lr = vgic_get_lr(vcpu, i);
725 726 727 728 729 730 731 732 733 734 735

		/*
		 * There are three options for the state bits:
		 *
		 * 01: pending
		 * 10: active
		 * 11: pending and active
		 *
		 * If the LR holds only an active interrupt (not pending) then
		 * just leave it alone.
		 */
736
		if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
737 738 739 740 741 742 743 744
			continue;

		/*
		 * Reestablish the pending state on the distributor and the
		 * CPU interface.  It may have already been pending, but that
		 * is fine, then we are only setting a few bits that were
		 * already set.
		 */
745
		vgic_dist_irq_set_pending(vcpu, lr.irq);
746 747 748 749
		if (lr.irq < VGIC_NR_SGIS)
			dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
		lr.state &= ~LR_STATE_PENDING;
		vgic_set_lr(vcpu, i, lr);
750 751 752 753 754 755

		/*
		 * If there's no state left on the LR (it could still be
		 * active), then the LR does not hold any useful info and can
		 * be marked as free for other use.
		 */
756
		if (!(lr.state & LR_STATE_MASK)) {
757
			vgic_retire_lr(i, lr.irq, vcpu);
758 759
			vgic_irq_clear_queued(vcpu, lr.irq);
		}
760 761 762 763 764 765

		/* Finally update the VGIC state. */
		vgic_update_state(vcpu->kvm);
	}
}

766 767 768 769
/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
					struct kvm_exit_mmio *mmio,
					phys_addr_t offset)
770
{
771 772 773 774 775 776 777 778 779 780 781 782 783 784
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	int sgi;
	int min_sgi = (offset & ~0x3) * 4;
	int max_sgi = min_sgi + 3;
	int vcpu_id = vcpu->vcpu_id;
	u32 reg = 0;

	/* Copy source SGIs from distributor side */
	for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
		int shift = 8 * (sgi - min_sgi);
		reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
	}

	mmio_data_write(mmio, ~0, reg);
785 786 787
	return false;
}

788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
					 struct kvm_exit_mmio *mmio,
					 phys_addr_t offset, bool set)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	int sgi;
	int min_sgi = (offset & ~0x3) * 4;
	int max_sgi = min_sgi + 3;
	int vcpu_id = vcpu->vcpu_id;
	u32 reg;
	bool updated = false;

	reg = mmio_data_read(mmio, ~0);

	/* Clear pending SGIs on the distributor */
	for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
		u8 mask = reg >> (8 * (sgi - min_sgi));
		if (set) {
			if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
				updated = true;
			dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
		} else {
			if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
				updated = true;
			dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
		}
	}

	if (updated)
		vgic_update_state(vcpu->kvm);

	return updated;
}

822 823 824 825
static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
				struct kvm_exit_mmio *mmio,
				phys_addr_t offset)
{
826 827 828 829 830 831 832 833 834 835 836 837 838 839
	if (!mmio->is_write)
		return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
	else
		return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
}

static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
				  struct kvm_exit_mmio *mmio,
				  phys_addr_t offset)
{
	if (!mmio->is_write)
		return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
	else
		return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
840 841
}

842 843 844 845 846 847 848 849 850 851 852 853 854
/*
 * I would have liked to use the kvm_bus_io_*() API instead, but it
 * cannot cope with banked registers (only the VM pointer is passed
 * around, and we need the vcpu). One of these days, someone please
 * fix it!
 */
struct mmio_range {
	phys_addr_t base;
	unsigned long len;
	bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
			    phys_addr_t offset);
};

855
static const struct mmio_range vgic_dist_ranges[] = {
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
	{
		.base		= GIC_DIST_CTRL,
		.len		= 12,
		.handle_mmio	= handle_mmio_misc,
	},
	{
		.base		= GIC_DIST_IGROUP,
		.len		= VGIC_NR_IRQS / 8,
		.handle_mmio	= handle_mmio_raz_wi,
	},
	{
		.base		= GIC_DIST_ENABLE_SET,
		.len		= VGIC_NR_IRQS / 8,
		.handle_mmio	= handle_mmio_set_enable_reg,
	},
	{
		.base		= GIC_DIST_ENABLE_CLEAR,
		.len		= VGIC_NR_IRQS / 8,
		.handle_mmio	= handle_mmio_clear_enable_reg,
	},
	{
		.base		= GIC_DIST_PENDING_SET,
		.len		= VGIC_NR_IRQS / 8,
		.handle_mmio	= handle_mmio_set_pending_reg,
	},
	{
		.base		= GIC_DIST_PENDING_CLEAR,
		.len		= VGIC_NR_IRQS / 8,
		.handle_mmio	= handle_mmio_clear_pending_reg,
	},
	{
		.base		= GIC_DIST_ACTIVE_SET,
		.len		= VGIC_NR_IRQS / 8,
		.handle_mmio	= handle_mmio_raz_wi,
	},
	{
		.base		= GIC_DIST_ACTIVE_CLEAR,
		.len		= VGIC_NR_IRQS / 8,
		.handle_mmio	= handle_mmio_raz_wi,
	},
	{
		.base		= GIC_DIST_PRI,
		.len		= VGIC_NR_IRQS,
		.handle_mmio	= handle_mmio_priority_reg,
	},
	{
		.base		= GIC_DIST_TARGET,
		.len		= VGIC_NR_IRQS,
		.handle_mmio	= handle_mmio_target_reg,
	},
	{
		.base		= GIC_DIST_CONFIG,
		.len		= VGIC_NR_IRQS / 4,
		.handle_mmio	= handle_mmio_cfg_reg,
	},
	{
		.base		= GIC_DIST_SOFTINT,
		.len		= 4,
		.handle_mmio	= handle_mmio_sgi_reg,
	},
916 917 918 919 920 921 922 923 924 925
	{
		.base		= GIC_DIST_SGI_PENDING_CLEAR,
		.len		= VGIC_NR_SGIS,
		.handle_mmio	= handle_mmio_sgi_clear,
	},
	{
		.base		= GIC_DIST_SGI_PENDING_SET,
		.len		= VGIC_NR_SGIS,
		.handle_mmio	= handle_mmio_sgi_set,
	},
926 927 928 929 930 931
	{}
};

static const
struct mmio_range *find_matching_range(const struct mmio_range *ranges,
				       struct kvm_exit_mmio *mmio,
932
				       phys_addr_t offset)
933 934 935 936
{
	const struct mmio_range *r = ranges;

	while (r->len) {
937 938
		if (offset >= r->base &&
		    (offset + mmio->len) <= (r->base + r->len))
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
			return r;
		r++;
	}

	return NULL;
}

/**
 * vgic_handle_mmio - handle an in-kernel MMIO access
 * @vcpu:	pointer to the vcpu performing the access
 * @run:	pointer to the kvm_run structure
 * @mmio:	pointer to the data describing the access
 *
 * returns true if the MMIO access has been performed in kernel space,
 * and false if it needs to be emulated in user space.
 */
bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
		      struct kvm_exit_mmio *mmio)
{
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
	const struct mmio_range *range;
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	unsigned long base = dist->vgic_dist_base;
	bool updated_state;
	unsigned long offset;

	if (!irqchip_in_kernel(vcpu->kvm) ||
	    mmio->phys_addr < base ||
	    (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
		return false;

	/* We don't support ldrd / strd or ldm / stm to the emulated vgic */
	if (mmio->len > 4) {
		kvm_inject_dabt(vcpu, mmio->phys_addr);
		return true;
	}

975 976
	offset = mmio->phys_addr - base;
	range = find_matching_range(vgic_dist_ranges, mmio, offset);
977 978 979 980 981 982 983 984 985 986 987 988 989
	if (unlikely(!range || !range->handle_mmio)) {
		pr_warn("Unhandled access %d %08llx %d\n",
			mmio->is_write, mmio->phys_addr, mmio->len);
		return false;
	}

	spin_lock(&vcpu->kvm->arch.vgic.lock);
	offset = mmio->phys_addr - range->base - base;
	updated_state = range->handle_mmio(vcpu, mmio, offset);
	spin_unlock(&vcpu->kvm->arch.vgic.lock);
	kvm_prepare_mmio(run, mmio);
	kvm_handle_mmio_return(vcpu, run);

990 991 992
	if (updated_state)
		vgic_kick_vcpus(vcpu->kvm);

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
	return true;
}

static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
{
	struct kvm *kvm = vcpu->kvm;
	struct vgic_dist *dist = &kvm->arch.vgic;
	int nrcpus = atomic_read(&kvm->online_vcpus);
	u8 target_cpus;
	int sgi, mode, c, vcpu_id;

	vcpu_id = vcpu->vcpu_id;

	sgi = reg & 0xf;
	target_cpus = (reg >> 16) & 0xff;
	mode = (reg >> 24) & 3;

	switch (mode) {
	case 0:
		if (!target_cpus)
			return;
1014
		break;
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027

	case 1:
		target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
		break;

	case 2:
		target_cpus = 1 << vcpu_id;
		break;
	}

	kvm_for_each_vcpu(c, vcpu, kvm) {
		if (target_cpus & 1) {
			/* Flag the SGI as pending */
1028
			vgic_dist_irq_set_pending(vcpu, sgi);
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
			dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
			kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
		}

		target_cpus >>= 1;
	}
}

static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
{
1039 1040 1041 1042 1043 1044 1045 1046 1047
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
	unsigned long pending_private, pending_shared;
	int vcpu_id;

	vcpu_id = vcpu->vcpu_id;
	pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
	pend_shared = vcpu->arch.vgic_cpu.pending_shared;

1048
	pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
1049 1050 1051
	enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
	bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);

1052
	pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
	bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
	bitmap_and(pend_shared, pend_shared,
		   vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
		   VGIC_NR_SHARED_IRQS);

	pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
	pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
	return (pending_private < VGIC_NR_PRIVATE_IRQS ||
		pending_shared < VGIC_NR_SHARED_IRQS);
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
}

/*
 * Update the interrupt state and determine which CPUs have pending
 * interrupts. Must be called with distributor lock held.
 */
static void vgic_update_state(struct kvm *kvm)
{
	struct vgic_dist *dist = &kvm->arch.vgic;
	struct kvm_vcpu *vcpu;
	int c;

	if (!dist->enabled) {
		set_bit(0, &dist->irq_pending_on_cpu);
		return;
	}

	kvm_for_each_vcpu(c, vcpu, kvm) {
		if (compute_pending_for_cpu(vcpu)) {
			pr_debug("CPU%d has pending interrupts\n", c);
			set_bit(c, &dist->irq_pending_on_cpu);
		}
	}
1086
}
1087

1088 1089
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
{
1090
	return vgic_ops->get_lr(vcpu, lr);
1091 1092 1093 1094 1095
}

static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
			       struct vgic_lr vlr)
{
1096
	vgic_ops->set_lr(vcpu, lr, vlr);
1097 1098
}

1099 1100 1101
static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
			       struct vgic_lr vlr)
{
1102
	vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1103 1104 1105 1106
}

static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{
1107
	return vgic_ops->get_elrsr(vcpu);
1108 1109
}

1110 1111
static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
{
1112
	return vgic_ops->get_eisr(vcpu);
1113 1114
}

1115 1116
static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
{
1117
	return vgic_ops->get_interrupt_status(vcpu);
1118 1119
}

1120 1121
static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
{
1122
	vgic_ops->enable_underflow(vcpu);
1123 1124 1125 1126
}

static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
{
1127
	vgic_ops->disable_underflow(vcpu);
1128 1129
}

1130 1131
static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
1132
	vgic_ops->get_vmcr(vcpu, vmcr);
1133 1134 1135 1136
}

static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
1137
	vgic_ops->set_vmcr(vcpu, vmcr);
1138 1139
}

1140 1141
static inline void vgic_enable(struct kvm_vcpu *vcpu)
{
1142
	vgic_ops->enable(vcpu);
1143 1144
}

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);

	vlr.state = 0;
	vgic_set_lr(vcpu, lr_nr, vlr);
	clear_bit(lr_nr, vgic_cpu->lr_used);
	vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
}
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169

/*
 * An interrupt may have been disabled after being made pending on the
 * CPU interface (the classic case is a timer running while we're
 * rebooting the guest - the interrupt would kick as soon as the CPU
 * interface gets enabled, with deadly consequences).
 *
 * The solution is to examine already active LRs, and check the
 * interrupt is still enabled. If not, just retire it.
 */
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	int lr;

1170
	for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1171
		struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1172

1173 1174
		if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
			vgic_retire_lr(lr, vlr.irq, vcpu);
1175 1176
			if (vgic_irq_is_queued(vcpu, vlr.irq))
				vgic_irq_clear_queued(vcpu, vlr.irq);
1177 1178 1179 1180
		}
	}
}

1181 1182 1183 1184 1185 1186 1187
/*
 * Queue an interrupt to a CPU virtual interface. Return true on success,
 * or false if it wasn't possible to queue it.
 */
static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1188
	struct vgic_lr vlr;
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	int lr;

	/* Sanitize the input... */
	BUG_ON(sgi_source_id & ~7);
	BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
	BUG_ON(irq >= VGIC_NR_IRQS);

	kvm_debug("Queue IRQ%d\n", irq);

	lr = vgic_cpu->vgic_irq_lr_map[irq];

	/* Do we have an active interrupt for the same CPUID? */
1201 1202 1203 1204 1205 1206 1207 1208 1209
	if (lr != LR_EMPTY) {
		vlr = vgic_get_lr(vcpu, lr);
		if (vlr.source == sgi_source_id) {
			kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
			BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
			vlr.state |= LR_STATE_PENDING;
			vgic_set_lr(vcpu, lr, vlr);
			return true;
		}
1210 1211 1212 1213
	}

	/* Try to use another LR for this interrupt */
	lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1214 1215
			       vgic->nr_lr);
	if (lr >= vgic->nr_lr)
1216 1217 1218 1219 1220 1221
		return false;

	kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
	vgic_cpu->vgic_irq_lr_map[irq] = lr;
	set_bit(lr, vgic_cpu->lr_used);

1222 1223 1224
	vlr.irq = irq;
	vlr.source = sgi_source_id;
	vlr.state = LR_STATE_PENDING;
1225
	if (!vgic_irq_is_edge(vcpu, irq))
1226 1227 1228
		vlr.state |= LR_EOI_INT;

	vgic_set_lr(vcpu, lr, vlr);
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255

	return true;
}

static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	unsigned long sources;
	int vcpu_id = vcpu->vcpu_id;
	int c;

	sources = dist->irq_sgi_sources[vcpu_id][irq];

	for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
		if (vgic_queue_irq(vcpu, c, irq))
			clear_bit(c, &sources);
	}

	dist->irq_sgi_sources[vcpu_id][irq] = sources;

	/*
	 * If the sources bitmap has been cleared it means that we
	 * could queue all the SGIs onto link registers (see the
	 * clear_bit above), and therefore we are done with them in
	 * our emulated gic and can get rid of them.
	 */
	if (!sources) {
1256
		vgic_dist_irq_clear_pending(vcpu, irq);
1257 1258 1259 1260 1261 1262 1263 1264 1265
		vgic_cpu_irq_clear(vcpu, irq);
		return true;
	}

	return false;
}

static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
{
1266
	if (!vgic_can_sample_irq(vcpu, irq))
1267 1268 1269 1270
		return true; /* level interrupt, already queued */

	if (vgic_queue_irq(vcpu, 0, irq)) {
		if (vgic_irq_is_edge(vcpu, irq)) {
1271
			vgic_dist_irq_clear_pending(vcpu, irq);
1272 1273
			vgic_cpu_irq_clear(vcpu, irq);
		} else {
1274
			vgic_irq_set_queued(vcpu, irq);
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
		}

		return true;
	}

	return false;
}

/*
 * Fill the list registers with pending interrupts before running the
 * guest.
 */
static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	int i, vcpu_id;
	int overflow = 0;

	vcpu_id = vcpu->vcpu_id;

	/*
	 * We may not have any pending interrupt, or the interrupts
	 * may have been serviced from another vcpu. In all cases,
	 * move along.
	 */
	if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
		pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
		goto epilog;
	}

	/* SGIs */
	for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
		if (!vgic_queue_sgi(vcpu, i))
			overflow = 1;
	}

	/* PPIs */
	for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
		if (!vgic_queue_hwirq(vcpu, i))
			overflow = 1;
	}

	/* SPIs */
	for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
		if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
			overflow = 1;
	}

epilog:
	if (overflow) {
1326
		vgic_enable_underflow(vcpu);
1327
	} else {
1328
		vgic_disable_underflow(vcpu);
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
		/*
		 * We're about to run this VCPU, and we've consumed
		 * everything the distributor had in store for
		 * us. Claim we don't have anything pending. We'll
		 * adjust that if needed while exiting.
		 */
		clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
	}
}

static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
{
1341
	u32 status = vgic_get_interrupt_status(vcpu);
1342 1343
	bool level_pending = false;

1344
	kvm_debug("STATUS = %08x\n", status);
1345

1346
	if (status & INT_STATUS_EOI) {
1347 1348 1349 1350
		/*
		 * Some level interrupts have been EOIed. Clear their
		 * active bit.
		 */
1351 1352
		u64 eisr = vgic_get_eisr(vcpu);
		unsigned long *eisr_ptr = (unsigned long *)&eisr;
1353
		int lr;
1354

1355
		for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1356
			struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1357
			WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1358

1359
			vgic_irq_clear_queued(vcpu, vlr.irq);
1360 1361 1362
			WARN_ON(vlr.state & LR_STATE_MASK);
			vlr.state = 0;
			vgic_set_lr(vcpu, lr, vlr);
1363

1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
			/*
			 * If the IRQ was EOIed it was also ACKed and we we
			 * therefore assume we can clear the soft pending
			 * state (should it had been set) for this interrupt.
			 *
			 * Note: if the IRQ soft pending state was set after
			 * the IRQ was acked, it actually shouldn't be
			 * cleared, but we have no way of knowing that unless
			 * we start trapping ACKs when the soft-pending state
			 * is set.
			 */
			vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);

1377
			/* Any additional pending interrupt? */
1378
			if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1379
				vgic_cpu_irq_set(vcpu, vlr.irq);
1380 1381
				level_pending = true;
			} else {
1382
				vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1383
				vgic_cpu_irq_clear(vcpu, vlr.irq);
1384
			}
1385 1386 1387 1388 1389

			/*
			 * Despite being EOIed, the LR may not have
			 * been marked as empty.
			 */
1390
			vgic_sync_lr_elrsr(vcpu, lr, vlr);
1391 1392 1393
		}
	}

1394
	if (status & INT_STATUS_UNDERFLOW)
1395
		vgic_disable_underflow(vcpu);
1396 1397 1398 1399 1400

	return level_pending;
}

/*
1401 1402
 * Sync back the VGIC state after a guest run. The distributor lock is
 * needed so we don't get preempted in the middle of the state processing.
1403 1404 1405 1406 1407
 */
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1408 1409
	u64 elrsr;
	unsigned long *elrsr_ptr;
1410 1411 1412 1413
	int lr, pending;
	bool level_pending;

	level_pending = vgic_process_maintenance(vcpu);
1414 1415
	elrsr = vgic_get_elrsr(vcpu);
	elrsr_ptr = (unsigned long *)&elrsr;
1416 1417

	/* Clear mappings for empty LRs */
1418
	for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1419
		struct vgic_lr vlr;
1420 1421 1422 1423

		if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
			continue;

1424
		vlr = vgic_get_lr(vcpu, lr);
1425

1426 1427
		BUG_ON(vlr.irq >= VGIC_NR_IRQS);
		vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1428 1429 1430
	}

	/* Check if we still have something up our sleeve... */
1431 1432
	pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
	if (level_pending || pending < vgic->nr_lr)
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
		set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
}

void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	if (!irqchip_in_kernel(vcpu->kvm))
		return;

	spin_lock(&dist->lock);
	__kvm_vgic_flush_hwstate(vcpu);
	spin_unlock(&dist->lock);
}

void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
1450 1451
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

1452 1453 1454
	if (!irqchip_in_kernel(vcpu->kvm))
		return;

1455
	spin_lock(&dist->lock);
1456
	__kvm_vgic_sync_hwstate(vcpu);
1457
	spin_unlock(&dist->lock);
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
}

int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;

	if (!irqchip_in_kernel(vcpu->kvm))
		return 0;

	return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
}

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
static void vgic_kick_vcpus(struct kvm *kvm)
{
	struct kvm_vcpu *vcpu;
	int c;

	/*
	 * We've injected an interrupt, time to find out who deserves
	 * a good kick...
	 */
	kvm_for_each_vcpu(c, vcpu, kvm) {
		if (kvm_vgic_vcpu_pending_irq(vcpu))
			kvm_vcpu_kick(vcpu);
	}
}

static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
{
1487
	int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1488 1489 1490 1491 1492 1493

	/*
	 * Only inject an interrupt if:
	 * - edge triggered and we have a rising edge
	 * - level triggered and we change level
	 */
1494 1495
	if (edge_triggered) {
		int state = vgic_dist_irq_is_pending(vcpu, irq);
1496
		return level > state;
1497 1498
	} else {
		int state = vgic_dist_irq_get_level(vcpu, irq);
1499
		return level != state;
1500
	}
1501 1502
}

1503
static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1504 1505 1506 1507
				  unsigned int irq_num, bool level)
{
	struct vgic_dist *dist = &kvm->arch.vgic;
	struct kvm_vcpu *vcpu;
1508
	int edge_triggered, level_triggered;
1509 1510 1511 1512 1513 1514
	int enabled;
	bool ret = true;

	spin_lock(&dist->lock);

	vcpu = kvm_get_vcpu(kvm, cpuid);
1515 1516
	edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
	level_triggered = !edge_triggered;
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529

	if (!vgic_validate_injection(vcpu, irq_num, level)) {
		ret = false;
		goto out;
	}

	if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
		cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
		vcpu = kvm_get_vcpu(kvm, cpuid);
	}

	kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);

1530 1531 1532
	if (level) {
		if (level_triggered)
			vgic_dist_irq_set_level(vcpu, irq_num);
1533
		vgic_dist_irq_set_pending(vcpu, irq_num);
1534 1535 1536 1537 1538 1539 1540 1541 1542
	} else {
		if (level_triggered) {
			vgic_dist_irq_clear_level(vcpu, irq_num);
			if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
				vgic_dist_irq_clear_pending(vcpu, irq_num);
		} else {
			vgic_dist_irq_clear_pending(vcpu, irq_num);
		}
	}
1543 1544 1545 1546 1547 1548 1549 1550

	enabled = vgic_irq_is_enabled(vcpu, irq_num);

	if (!enabled) {
		ret = false;
		goto out;
	}

1551
	if (!vgic_can_sample_irq(vcpu, irq_num)) {
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
		/*
		 * Level interrupt in progress, will be picked up
		 * when EOId.
		 */
		ret = false;
		goto out;
	}

	if (level) {
		vgic_cpu_irq_set(vcpu, irq_num);
		set_bit(cpuid, &dist->irq_pending_on_cpu);
	}

out:
	spin_unlock(&dist->lock);

	return ret;
}

/**
 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
 * @kvm:     The VM structure pointer
 * @cpuid:   The CPU for PPIs
 * @irq_num: The IRQ number that is assigned to the device
 * @level:   Edge-triggered:  true:  to trigger the interrupt
 *			      false: to ignore the call
 *	     Level-sensitive  true:  activates an interrupt
 *			      false: deactivates an interrupt
 *
 * The GIC is not concerned with devices being active-LOW or active-HIGH for
 * level-sensitive interrupts.  You can think of the level parameter as 1
 * being HIGH and 0 being LOW and all devices being active-HIGH.
 */
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
			bool level)
{
1588 1589
	if (likely(vgic_initialized(kvm)) &&
	    vgic_update_irq_pending(kvm, cpuid, irq_num, level))
1590 1591 1592 1593 1594
		vgic_kick_vcpus(kvm);

	return 0;
}

1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
{
	/*
	 * We cannot rely on the vgic maintenance interrupt to be
	 * delivered synchronously. This means we can only use it to
	 * exit the VM, and we perform the handling of EOIed
	 * interrupts on the exit path (see vgic_process_maintenance).
	 */
	return IRQ_HANDLED;
}

1606 1607 1608 1609 1610 1611 1612
/**
 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
 * @vcpu: pointer to the vcpu struct
 *
 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
 * this vcpu and enable the VGIC for this VCPU
 */
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	int i;

	if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
		return -EBUSY;

	for (i = 0; i < VGIC_NR_IRQS; i++) {
		if (i < VGIC_NR_PPIS)
			vgic_bitmap_set_irq_val(&dist->irq_enabled,
						vcpu->vcpu_id, i, 1);
		if (i < VGIC_NR_PRIVATE_IRQS)
			vgic_bitmap_set_irq_val(&dist->irq_cfg,
						vcpu->vcpu_id, i, VGIC_CFG_EDGE);

		vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
	}

	/*
1634 1635 1636
	 * Store the number of LRs per vcpu, so we don't have to go
	 * all the way to the distributor structure to find out. Only
	 * assembly code should use this one.
1637
	 */
1638
	vgic_cpu->nr_lr = vgic->nr_lr;
1639

1640
	vgic_enable(vcpu);
1641 1642 1643 1644

	return 0;
}

1645 1646 1647 1648 1649 1650 1651 1652 1653
/**
 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
 * @kvm: pointer to the kvm struct
 *
 * Map the virtual CPU interface into the VM before running any VCPUs.  We
 * can't do this at creation time, because user space must first set the
 * virtual CPU interface address in the guest physical address space.  Also
 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
 */
1654 1655 1656 1657
int kvm_vgic_init(struct kvm *kvm)
{
	int ret = 0, i;

1658 1659 1660
	if (!irqchip_in_kernel(kvm))
		return 0;

1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
	mutex_lock(&kvm->lock);

	if (vgic_initialized(kvm))
		goto out;

	if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
	    IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
		kvm_err("Need to set vgic cpu and dist addresses first\n");
		ret = -ENXIO;
		goto out;
	}

	ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1674
				    vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
	if (ret) {
		kvm_err("Unable to remap VGIC CPU to VCPU\n");
		goto out;
	}

	for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
		vgic_set_target_reg(kvm, 0, i);

	kvm->arch.vgic.ready = true;
out:
	mutex_unlock(&kvm->lock);
	return ret;
}

int kvm_vgic_create(struct kvm *kvm)
{
1691 1692
	int i, vcpu_lock_idx = -1, ret = 0;
	struct kvm_vcpu *vcpu;
1693 1694 1695

	mutex_lock(&kvm->lock);

1696
	if (kvm->arch.vgic.vctrl_base) {
1697 1698 1699 1700
		ret = -EEXIST;
		goto out;
	}

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
	/*
	 * Any time a vcpu is run, vcpu_load is called which tries to grab the
	 * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
	 * that no other VCPUs are run while we create the vgic.
	 */
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (!mutex_trylock(&vcpu->mutex))
			goto out_unlock;
		vcpu_lock_idx = i;
	}

	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (vcpu->arch.has_run_once) {
			ret = -EBUSY;
			goto out_unlock;
		}
	}

1719
	spin_lock_init(&kvm->arch.vgic.lock);
1720
	kvm->arch.vgic.in_kernel = true;
1721
	kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1722 1723 1724
	kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
	kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;

1725 1726 1727 1728 1729 1730
out_unlock:
	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
		vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
		mutex_unlock(&vcpu->mutex);
	}

1731 1732 1733 1734 1735
out:
	mutex_unlock(&kvm->lock);
	return ret;
}

1736
static int vgic_ioaddr_overlap(struct kvm *kvm)
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
{
	phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
	phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;

	if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
		return 0;
	if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
	    (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
		return -EBUSY;
	return 0;
}

static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
			      phys_addr_t addr, phys_addr_t size)
{
	int ret;

1754 1755 1756 1757 1758 1759
	if (addr & ~KVM_PHYS_MASK)
		return -E2BIG;

	if (addr & (SZ_4K - 1))
		return -EINVAL;

1760 1761 1762 1763 1764
	if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
		return -EEXIST;
	if (addr + size < addr)
		return -EINVAL;

1765
	*ioaddr = addr;
1766 1767
	ret = vgic_ioaddr_overlap(kvm);
	if (ret)
1768 1769
		*ioaddr = VGIC_ADDR_UNDEF;

1770 1771 1772
	return ret;
}

1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
/**
 * kvm_vgic_addr - set or get vgic VM base addresses
 * @kvm:   pointer to the vm struct
 * @type:  the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
 * @addr:  pointer to address value
 * @write: if true set the address in the VM address space, if false read the
 *          address
 *
 * Set or get the vgic base addresses for the distributor and the virtual CPU
 * interface in the VM physical address space.  These addresses are properties
 * of the emulated core/SoC and therefore user space initially knows this
 * information.
 */
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1787 1788 1789 1790 1791 1792 1793
{
	int r = 0;
	struct vgic_dist *vgic = &kvm->arch.vgic;

	mutex_lock(&kvm->lock);
	switch (type) {
	case KVM_VGIC_V2_ADDR_TYPE_DIST:
1794 1795 1796 1797 1798 1799
		if (write) {
			r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
					       *addr, KVM_VGIC_V2_DIST_SIZE);
		} else {
			*addr = vgic->vgic_dist_base;
		}
1800 1801
		break;
	case KVM_VGIC_V2_ADDR_TYPE_CPU:
1802 1803 1804 1805 1806 1807
		if (write) {
			r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
					       *addr, KVM_VGIC_V2_CPU_SIZE);
		} else {
			*addr = vgic->vgic_cpu_base;
		}
1808 1809 1810 1811 1812 1813 1814 1815
		break;
	default:
		r = -ENODEV;
	}

	mutex_unlock(&kvm->lock);
	return r;
}
1816

1817 1818 1819
static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
				 struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
1820
	bool updated = false;
1821 1822 1823 1824 1825
	struct vgic_vmcr vmcr;
	u32 *vmcr_field;
	u32 reg;

	vgic_get_vmcr(vcpu, &vmcr);
1826 1827 1828

	switch (offset & ~0x3) {
	case GIC_CPU_CTRL:
1829
		vmcr_field = &vmcr.ctlr;
1830 1831
		break;
	case GIC_CPU_PRIMASK:
1832
		vmcr_field = &vmcr.pmr;
1833 1834
		break;
	case GIC_CPU_BINPOINT:
1835
		vmcr_field = &vmcr.bpr;
1836 1837
		break;
	case GIC_CPU_ALIAS_BINPOINT:
1838
		vmcr_field = &vmcr.abpr;
1839
		break;
1840 1841
	default:
		BUG();
1842 1843 1844
	}

	if (!mmio->is_write) {
1845
		reg = *vmcr_field;
1846 1847 1848
		mmio_data_write(mmio, ~0, reg);
	} else {
		reg = mmio_data_read(mmio, ~0);
1849 1850 1851
		if (reg != *vmcr_field) {
			*vmcr_field = reg;
			vgic_set_vmcr(vcpu, &vmcr);
1852
			updated = true;
1853
		}
1854 1855 1856 1857 1858 1859 1860 1861
	}
	return updated;
}

static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
			     struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
	return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
1862 1863
}

1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
				  struct kvm_exit_mmio *mmio,
				  phys_addr_t offset)
{
	u32 reg;

	if (mmio->is_write)
		return false;

	/* GICC_IIDR */
	reg = (PRODUCT_ID_KVM << 20) |
	      (GICC_ARCH_VERSION_V2 << 16) |
	      (IMPLEMENTER_ARM << 0);
	mmio_data_write(mmio, ~0, reg);
	return false;
}

/*
 * CPU Interface Register accesses - these are not accessed by the VM, but by
 * user space for saving and restoring VGIC state.
 */
1885 1886 1887 1888 1889 1890 1891 1892 1893
static const struct mmio_range vgic_cpu_ranges[] = {
	{
		.base		= GIC_CPU_CTRL,
		.len		= 12,
		.handle_mmio	= handle_cpu_mmio_misc,
	},
	{
		.base		= GIC_CPU_ALIAS_BINPOINT,
		.len		= 4,
1894
		.handle_mmio	= handle_mmio_abpr,
1895 1896 1897 1898
	},
	{
		.base		= GIC_CPU_ACTIVEPRIO,
		.len		= 16,
1899
		.handle_mmio	= handle_mmio_raz_wi,
1900 1901 1902 1903
	},
	{
		.base		= GIC_CPU_IDENT,
		.len		= 4,
1904
		.handle_mmio	= handle_cpu_mmio_ident,
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
	},
};

static int vgic_attr_regs_access(struct kvm_device *dev,
				 struct kvm_device_attr *attr,
				 u32 *reg, bool is_write)
{
	const struct mmio_range *r = NULL, *ranges;
	phys_addr_t offset;
	int ret, cpuid, c;
	struct kvm_vcpu *vcpu, *tmp_vcpu;
	struct vgic_dist *vgic;
	struct kvm_exit_mmio mmio;

	offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
		KVM_DEV_ARM_VGIC_CPUID_SHIFT;

	mutex_lock(&dev->kvm->lock);

	if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
		ret = -EINVAL;
		goto out;
	}

	vcpu = kvm_get_vcpu(dev->kvm, cpuid);
	vgic = &dev->kvm->arch.vgic;

	mmio.len = 4;
	mmio.is_write = is_write;
	if (is_write)
		mmio_data_write(&mmio, ~0, *reg);
	switch (attr->group) {
	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
		mmio.phys_addr = vgic->vgic_dist_base + offset;
		ranges = vgic_dist_ranges;
		break;
	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
		mmio.phys_addr = vgic->vgic_cpu_base + offset;
		ranges = vgic_cpu_ranges;
		break;
	default:
		BUG();
	}
	r = find_matching_range(ranges, &mmio, offset);

	if (unlikely(!r || !r->handle_mmio)) {
		ret = -ENXIO;
		goto out;
	}


	spin_lock(&vgic->lock);

	/*
	 * Ensure that no other VCPU is running by checking the vcpu->cpu
	 * field.  If no other VPCUs are running we can safely access the VGIC
	 * state, because even if another VPU is run after this point, that
	 * VCPU will not touch the vgic state, because it will block on
	 * getting the vgic->lock in kvm_vgic_sync_hwstate().
	 */
	kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
		if (unlikely(tmp_vcpu->cpu != -1)) {
			ret = -EBUSY;
			goto out_vgic_unlock;
		}
	}

1973 1974 1975 1976 1977 1978 1979 1980
	/*
	 * Move all pending IRQs from the LRs on all VCPUs so the pending
	 * state can be properly represented in the register state accessible
	 * through this API.
	 */
	kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
		vgic_unqueue_irqs(tmp_vcpu);

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
	offset -= r->base;
	r->handle_mmio(vcpu, &mmio, offset);

	if (!is_write)
		*reg = mmio_data_read(&mmio, ~0);

	ret = 0;
out_vgic_unlock:
	spin_unlock(&vgic->lock);
out:
	mutex_unlock(&dev->kvm->lock);
	return ret;
}

1995 1996
static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	int r;

	switch (attr->group) {
	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
		u64 addr;
		unsigned long type = (unsigned long)attr->attr;

		if (copy_from_user(&addr, uaddr, sizeof(addr)))
			return -EFAULT;

		r = kvm_vgic_addr(dev->kvm, type, &addr, true);
		return (r == -ENODEV) ? -ENXIO : r;
	}
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022

	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
		u32 reg;

		if (get_user(reg, uaddr))
			return -EFAULT;

		return vgic_attr_regs_access(dev, attr, &reg, true);
	}

2023 2024
	}

2025 2026 2027 2028 2029
	return -ENXIO;
}

static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
	int r = -ENXIO;

	switch (attr->group) {
	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
		u64 addr;
		unsigned long type = (unsigned long)attr->attr;

		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
		if (r)
			return (r == -ENODEV) ? -ENXIO : r;

		if (copy_to_user(uaddr, &addr, sizeof(addr)))
			return -EFAULT;
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
		break;
	}

	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
		u32 reg = 0;

		r = vgic_attr_regs_access(dev, attr, &reg, false);
		if (r)
			return r;
		r = put_user(reg, uaddr);
		break;
2057
	}
2058

2059 2060 2061
	}

	return r;
2062 2063
}

2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
static int vgic_has_attr_regs(const struct mmio_range *ranges,
			      phys_addr_t offset)
{
	struct kvm_exit_mmio dev_attr_mmio;

	dev_attr_mmio.len = 4;
	if (find_matching_range(ranges, &dev_attr_mmio, offset))
		return 0;
	else
		return -ENXIO;
}

2076 2077
static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
2078 2079
	phys_addr_t offset;

2080 2081 2082 2083 2084 2085 2086 2087
	switch (attr->group) {
	case KVM_DEV_ARM_VGIC_GRP_ADDR:
		switch (attr->attr) {
		case KVM_VGIC_V2_ADDR_TYPE_DIST:
		case KVM_VGIC_V2_ADDR_TYPE_CPU:
			return 0;
		}
		break;
2088 2089 2090 2091 2092 2093
	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
		offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
		return vgic_has_attr_regs(vgic_dist_ranges, offset);
	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
		offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
		return vgic_has_attr_regs(vgic_cpu_ranges, offset);
2094
	}
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
	return -ENXIO;
}

static void vgic_destroy(struct kvm_device *dev)
{
	kfree(dev);
}

static int vgic_create(struct kvm_device *dev, u32 type)
{
	return kvm_vgic_create(dev->kvm);
}

2108
static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2109 2110 2111 2112 2113 2114 2115
	.name = "kvm-arm-vgic",
	.create = vgic_create,
	.destroy = vgic_destroy,
	.set_attr = vgic_set_attr,
	.get_attr = vgic_get_attr,
	.has_attr = vgic_has_attr,
};
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151

static void vgic_init_maintenance_interrupt(void *info)
{
	enable_percpu_irq(vgic->maint_irq, 0);
}

static int vgic_cpu_notify(struct notifier_block *self,
			   unsigned long action, void *cpu)
{
	switch (action) {
	case CPU_STARTING:
	case CPU_STARTING_FROZEN:
		vgic_init_maintenance_interrupt(NULL);
		break;
	case CPU_DYING:
	case CPU_DYING_FROZEN:
		disable_percpu_irq(vgic->maint_irq);
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block vgic_cpu_nb = {
	.notifier_call = vgic_cpu_notify,
};

static const struct of_device_id vgic_ids[] = {
	{ .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
	{ .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
	{},
};

int kvm_vgic_hyp_init(void)
{
	const struct of_device_id *matched_id;
2152 2153
	const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
				const struct vgic_params **);
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
	struct device_node *vgic_node;
	int ret;

	vgic_node = of_find_matching_node_and_match(NULL,
						    vgic_ids, &matched_id);
	if (!vgic_node) {
		kvm_err("error: no compatible GIC node found\n");
		return -ENODEV;
	}

	vgic_probe = matched_id->data;
	ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
	if (ret)
		return ret;

	ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
				 "vgic", kvm_get_running_vcpus());
	if (ret) {
		kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
		return ret;
	}

	ret = __register_cpu_notifier(&vgic_cpu_nb);
	if (ret) {
		kvm_err("Cannot register vgic CPU notifier\n");
		goto out_free_irq;
	}

	/* Callback into for arch code for setup */
	vgic_arch_setup(vgic);

	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);

	return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
				       KVM_DEV_TYPE_ARM_VGIC_V2);

out_free_irq:
	free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
	return ret;
}