vgic.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2015, 2016 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
19
#include <linux/list_sort.h>
20 21 22

#include "vgic.h"

23
#define CREATE_TRACE_POINTS
24
#include "trace.h"
25 26 27 28 29 30 31

#ifdef CONFIG_DEBUG_SPINLOCK
#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
#else
#define DEBUG_SPINLOCK_BUG_ON(p)
#endif

32 33 34
struct vgic_global kvm_vgic_global_state __ro_after_init = {
	.gicv3_cpuif = STATIC_KEY_FALSE_INIT,
};
35

36 37
/*
 * Locking order is always:
38 39 40
 * its->cmd_lock (mutex)
 *   its->its_lock (mutex)
 *     vgic_cpu->ap_list_lock
41 42
 *       kvm->lpi_list_lock
 *         vgic_irq->irq_lock
43
 *
44 45 46 47 48
 * If you need to take multiple locks, always take the upper lock first,
 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
 * If you are already holding a lock and need to take a higher one, you
 * have to drop the lower ranking lock first and re-aquire it after having
 * taken the upper one.
49 50 51 52 53 54 55 56
 *
 * When taking more than one ap_list_lock at the same time, always take the
 * lowest numbered VCPU's ap_list_lock first, so:
 *   vcpuX->vcpu_id < vcpuY->vcpu_id:
 *     spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
 *     spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
 */

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Iterate over the VM's list of mapped LPIs to find the one with a
 * matching interrupt ID and return a reference to the IRQ structure.
 */
static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{
	struct vgic_dist *dist = &kvm->arch.vgic;
	struct vgic_irq *irq = NULL;

	spin_lock(&dist->lpi_list_lock);

	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
		if (irq->intid != intid)
			continue;

		/*
		 * This increases the refcount, the caller is expected to
		 * call vgic_put_irq() later once it's finished with the IRQ.
		 */
76
		vgic_get_irq_kref(irq);
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
		goto out_unlock;
	}
	irq = NULL;

out_unlock:
	spin_unlock(&dist->lpi_list_lock);

	return irq;
}

/*
 * This looks up the virtual interrupt ID to get the corresponding
 * struct vgic_irq. It also increases the refcount, so any caller is expected
 * to call vgic_put_irq() once it's finished with this IRQ.
 */
92 93 94 95 96 97 98 99 100 101 102
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
			      u32 intid)
{
	/* SGIs and PPIs */
	if (intid <= VGIC_MAX_PRIVATE)
		return &vcpu->arch.vgic_cpu.private_irqs[intid];

	/* SPIs */
	if (intid <= VGIC_MAX_SPI)
		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];

103
	/* LPIs */
104
	if (intid >= VGIC_MIN_LPI)
105
		return vgic_get_lpi(kvm, intid);
106 107 108 109

	WARN(1, "Looking up struct vgic_irq for reserved INTID");
	return NULL;
}
110

111 112 113 114 115
/*
 * We can't do anything in here, because we lack the kvm pointer to
 * lock and remove the item from the lpi_list. So we keep this function
 * empty and use the return value of kref_put() to trigger the freeing.
 */
116 117 118 119 120 121
static void vgic_irq_release(struct kref *ref)
{
}

void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
122
	struct vgic_dist *dist = &kvm->arch.vgic;
123

124 125 126
	if (irq->intid < VGIC_MIN_LPI)
		return;

127 128 129
	spin_lock(&dist->lpi_list_lock);
	if (!kref_put(&irq->refcount, vgic_irq_release)) {
		spin_unlock(&dist->lpi_list_lock);
130
		return;
131
	};
132 133 134 135 136 137

	list_del(&irq->lpi_list);
	dist->lpi_list_count--;
	spin_unlock(&dist->lpi_list_lock);

	kfree(irq);
138 139
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
/**
 * kvm_vgic_target_oracle - compute the target vcpu for an irq
 *
 * @irq:	The irq to route. Must be already locked.
 *
 * Based on the current state of the interrupt (enabled, pending,
 * active, vcpu and target_vcpu), compute the next vcpu this should be
 * given to. Return NULL if this shouldn't be injected at all.
 *
 * Requires the IRQ lock to be held.
 */
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{
	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));

	/* If the interrupt is active, it must stay on the current vcpu */
	if (irq->active)
		return irq->vcpu ? : irq->target_vcpu;

	/*
	 * If the IRQ is not active but enabled and pending, we should direct
	 * it to its configured target VCPU.
	 * If the distributor is disabled, pending interrupts shouldn't be
	 * forwarded.
	 */
165
	if (irq->enabled && irq_is_pending(irq)) {
166 167 168 169 170 171 172 173 174 175 176 177 178
		if (unlikely(irq->target_vcpu &&
			     !irq->target_vcpu->kvm->arch.vgic.enabled))
			return NULL;

		return irq->target_vcpu;
	}

	/* If neither active nor pending and enabled, then this IRQ should not
	 * be queued to any VCPU.
	 */
	return NULL;
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
/*
 * The order of items in the ap_lists defines how we'll pack things in LRs as
 * well, the first items in the list being the first things populated in the
 * LRs.
 *
 * A hard rule is that active interrupts can never be pushed out of the LRs
 * (and therefore take priority) since we cannot reliably trap on deactivation
 * of IRQs and therefore they have to be present in the LRs.
 *
 * Otherwise things should be sorted by the priority field and the GIC
 * hardware support will take care of preemption of priority groups etc.
 *
 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
 * to sort "b" before "a".
 */
static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
	struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
	bool penda, pendb;
	int ret;

	spin_lock(&irqa->irq_lock);
	spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);

	if (irqa->active || irqb->active) {
		ret = (int)irqb->active - (int)irqa->active;
		goto out;
	}

209 210
	penda = irqa->enabled && irq_is_pending(irqa);
	pendb = irqb->enabled && irq_is_pending(irqb);
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

	if (!penda || !pendb) {
		ret = (int)pendb - (int)penda;
		goto out;
	}

	/* Both pending and enabled, sort by priority */
	ret = irqa->priority - irqb->priority;
out:
	spin_unlock(&irqb->irq_lock);
	spin_unlock(&irqa->irq_lock);
	return ret;
}

/* Must be called with the ap_list_lock held */
static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;

	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));

	list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
/*
 * Only valid injection if changing level for level-triggered IRQs or for a
 * rising edge.
 */
static bool vgic_validate_injection(struct vgic_irq *irq, bool level)
{
	switch (irq->config) {
	case VGIC_CONFIG_LEVEL:
		return irq->line_level != level;
	case VGIC_CONFIG_EDGE:
		return level;
	}

	return false;
}

/*
 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
 * Do the queuing if necessary, taking the right locks in the right order.
 * Returns true when the IRQ was queued, false otherwise.
 *
 * Needs to be entered with the IRQ lock already held, but will return
 * with all locks dropped.
 */
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
{
	struct kvm_vcpu *vcpu;

	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));

retry:
	vcpu = vgic_target_oracle(irq);
	if (irq->vcpu || !vcpu) {
		/*
		 * If this IRQ is already on a VCPU's ap_list, then it
		 * cannot be moved or modified and there is no more work for
		 * us to do.
		 *
		 * Otherwise, if the irq is not pending and enabled, it does
		 * not need to be inserted into an ap_list and there is also
		 * no more work for us to do.
		 */
		spin_unlock(&irq->irq_lock);
278 279 280 281 282 283 284 285 286 287 288 289

		/*
		 * We have to kick the VCPU here, because we could be
		 * queueing an edge-triggered interrupt for which we
		 * get no EOI maintenance interrupt. In that case,
		 * while the IRQ is already on the VCPU's AP list, the
		 * VCPU could have EOI'ed the original interrupt and
		 * won't see this one until it exits for some other
		 * reason.
		 */
		if (vcpu)
			kvm_vcpu_kick(vcpu);
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
		return false;
	}

	/*
	 * We must unlock the irq lock to take the ap_list_lock where
	 * we are going to insert this new pending interrupt.
	 */
	spin_unlock(&irq->irq_lock);

	/* someone can do stuff here, which we re-check below */

	spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
	spin_lock(&irq->irq_lock);

	/*
	 * Did something change behind our backs?
	 *
	 * There are two cases:
	 * 1) The irq lost its pending state or was disabled behind our
	 *    backs and/or it was queued to another VCPU's ap_list.
	 * 2) Someone changed the affinity on this irq behind our
	 *    backs and we are now holding the wrong ap_list_lock.
	 *
	 * In both cases, drop the locks and retry.
	 */

	if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
		spin_unlock(&irq->irq_lock);
		spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);

		spin_lock(&irq->irq_lock);
		goto retry;
	}

324 325 326 327 328
	/*
	 * Grab a reference to the irq to reflect the fact that it is
	 * now in the ap_list.
	 */
	vgic_get_irq_kref(irq);
329 330 331 332 333 334 335 336 337 338 339
	list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
	irq->vcpu = vcpu;

	spin_unlock(&irq->irq_lock);
	spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);

	kvm_vcpu_kick(vcpu);

	return true;
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
/**
 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
 * @kvm:     The VM structure pointer
 * @cpuid:   The CPU for PPIs
 * @intid:   The INTID to inject a new state to.
 * @level:   Edge-triggered:  true:  to trigger the interrupt
 *			      false: to ignore the call
 *	     Level-sensitive  true:  raise the input signal
 *			      false: lower the input signal
 *
 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
 * level-sensitive interrupts.  You can think of the level parameter as 1
 * being HIGH and 0 being LOW and all devices being active-HIGH.
 */
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
			bool level)
356 357 358 359 360 361 362
{
	struct kvm_vcpu *vcpu;
	struct vgic_irq *irq;
	int ret;

	trace_vgic_update_irq_pending(cpuid, intid, level);

363 364 365 366
	ret = vgic_lazy_init(kvm);
	if (ret)
		return ret;

367 368 369 370 371 372 373 374 375 376 377 378 379
	vcpu = kvm_get_vcpu(kvm, cpuid);
	if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
		return -EINVAL;

	irq = vgic_get_irq(kvm, vcpu, intid);
	if (!irq)
		return -EINVAL;

	spin_lock(&irq->irq_lock);

	if (!vgic_validate_injection(irq, level)) {
		/* Nothing to see here, move along... */
		spin_unlock(&irq->irq_lock);
380
		vgic_put_irq(kvm, irq);
381 382 383
		return 0;
	}

384
	if (irq->config == VGIC_CONFIG_LEVEL)
385
		irq->line_level = level;
386 387
	else
		irq->pending_latch = true;
388 389

	vgic_queue_irq_unlock(kvm, irq);
390
	vgic_put_irq(kvm, irq);
391 392 393 394

	return 0;
}

395 396 397 398 399 400 401 402 403 404 405 406
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
{
	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);

	BUG_ON(!irq);

	spin_lock(&irq->irq_lock);

	irq->hw = true;
	irq->hwintid = phys_irq;

	spin_unlock(&irq->irq_lock);
407
	vgic_put_irq(vcpu->kvm, irq);
408 409 410 411 412 413

	return 0;
}

int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
414
	struct vgic_irq *irq;
415 416 417 418

	if (!vgic_initialized(vcpu->kvm))
		return -EAGAIN;

419 420 421
	irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
	BUG_ON(!irq);

422 423 424 425 426 427
	spin_lock(&irq->irq_lock);

	irq->hw = false;
	irq->hwintid = 0;

	spin_unlock(&irq->irq_lock);
428
	vgic_put_irq(vcpu->kvm, irq);
429 430 431 432

	return 0;
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
/**
 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
 *
 * @vcpu: The VCPU pointer
 *
 * Go over the list of "interesting" interrupts, and prune those that we
 * won't have to consider in the near future.
 */
static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_irq *irq, *tmp;

retry:
	spin_lock(&vgic_cpu->ap_list_lock);

	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;

		spin_lock(&irq->irq_lock);

		BUG_ON(vcpu != irq->vcpu);

		target_vcpu = vgic_target_oracle(irq);

		if (!target_vcpu) {
			/*
			 * We don't need to process this interrupt any
			 * further, move it off the list.
			 */
			list_del(&irq->ap_list);
			irq->vcpu = NULL;
			spin_unlock(&irq->irq_lock);
466 467 468 469 470 471 472 473 474

			/*
			 * This vgic_put_irq call matches the
			 * vgic_get_irq_kref in vgic_queue_irq_unlock,
			 * where we added the LPI to the ap_list. As
			 * we remove the irq from the list, we drop
			 * also drop the refcount.
			 */
			vgic_put_irq(vcpu->kvm, irq);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
			continue;
		}

		if (target_vcpu == vcpu) {
			/* We're on the right CPU */
			spin_unlock(&irq->irq_lock);
			continue;
		}

		/* This interrupt looks like it has to be migrated. */

		spin_unlock(&irq->irq_lock);
		spin_unlock(&vgic_cpu->ap_list_lock);

		/*
		 * Ensure locking order by always locking the smallest
		 * ID first.
		 */
		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
			vcpuA = vcpu;
			vcpuB = target_vcpu;
		} else {
			vcpuA = target_vcpu;
			vcpuB = vcpu;
		}

		spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
		spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
				 SINGLE_DEPTH_NESTING);
		spin_lock(&irq->irq_lock);

		/*
		 * If the affinity has been preserved, move the
		 * interrupt around. Otherwise, it means things have
		 * changed while the interrupt was unlocked, and we
		 * need to replay this.
		 *
		 * In all cases, we cannot trust the list not to have
		 * changed, so we restart from the beginning.
		 */
		if (target_vcpu == vgic_target_oracle(irq)) {
			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;

			list_del(&irq->ap_list);
			irq->vcpu = target_vcpu;
			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
		}

		spin_unlock(&irq->irq_lock);
		spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
		spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
		goto retry;
	}

	spin_unlock(&vgic_cpu->ap_list_lock);
}

static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
{
534 535 536 537
	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_fold_lr_state(vcpu);
	else
		vgic_v3_fold_lr_state(vcpu);
538 539 540 541 542 543 544
}

/* Requires the irq_lock to be held. */
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
				    struct vgic_irq *irq, int lr)
{
	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
545

546 547 548 549
	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_populate_lr(vcpu, irq, lr);
	else
		vgic_v3_populate_lr(vcpu, irq, lr);
550 551 552 553
}

static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
{
554 555 556 557
	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_clear_lr(vcpu, lr);
	else
		vgic_v3_clear_lr(vcpu, lr);
558 559 560 561
}

static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
{
562 563 564 565
	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_set_underflow(vcpu);
	else
		vgic_v3_set_underflow(vcpu);
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
}

/* Requires the ap_list_lock to be held. */
static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_irq *irq;
	int count = 0;

	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));

	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
		spin_lock(&irq->irq_lock);
		/* GICv2 SGIs can count for more than one... */
		if (vgic_irq_is_sgi(irq->intid) && irq->source)
			count += hweight8(irq->source);
		else
			count++;
		spin_unlock(&irq->irq_lock);
	}
	return count;
}

/* Requires the VCPU's ap_list_lock to be held. */
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_irq *irq;
	int count = 0;

	DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));

598
	if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr)
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
		vgic_sort_ap_list(vcpu);

	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
		spin_lock(&irq->irq_lock);

		if (unlikely(vgic_target_oracle(irq) != vcpu))
			goto next;

		/*
		 * If we get an SGI with multiple sources, try to get
		 * them in all at once.
		 */
		do {
			vgic_populate_lr(vcpu, irq, count++);
		} while (irq->source && count < kvm_vgic_global_state.nr_lr);

next:
		spin_unlock(&irq->irq_lock);

618 619 620 621
		if (count == kvm_vgic_global_state.nr_lr) {
			if (!list_is_last(&irq->ap_list,
					  &vgic_cpu->ap_list_head))
				vgic_set_underflow(vcpu);
622
			break;
623
		}
624 625 626 627 628 629 630 631 632 633 634 635
	}

	vcpu->arch.vgic_cpu.used_lrs = count;

	/* Nuke remaining LRs */
	for ( ; count < kvm_vgic_global_state.nr_lr; count++)
		vgic_clear_lr(vcpu, count);
}

/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
636 637
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;

638 639
	/* An empty ap_list_head implies used_lrs == 0 */
	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
640 641
		return;

642 643
	if (vgic_cpu->used_lrs)
		vgic_fold_lr_state(vcpu);
644 645 646 647 648 649
	vgic_prune_ap_list(vcpu);
}

/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
650 651 652 653 654 655 656 657 658 659
	/*
	 * If there are no virtual interrupts active or pending for this
	 * VCPU, then there is no work to do and we can bail out without
	 * taking any lock.  There is a potential race with someone injecting
	 * interrupts to the VCPU, but it is a benign race as the VCPU will
	 * either observe the new interrupt before or after doing this check,
	 * and introducing additional synchronization mechanism doesn't change
	 * this.
	 */
	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
660 661
		return;

662 663 664 665
	spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
	vgic_flush_lr_state(vcpu);
	spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
}
666

667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
	if (unlikely(!vgic_initialized(vcpu->kvm)))
		return;

	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_load(vcpu);
	else
		vgic_v3_load(vcpu);
}

void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
	if (unlikely(!vgic_initialized(vcpu->kvm)))
		return;

	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_put(vcpu);
	else
		vgic_v3_put(vcpu);
}

689 690 691 692 693 694 695 696 697 698 699 700 701
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_irq *irq;
	bool pending = false;

	if (!vcpu->kvm->arch.vgic.enabled)
		return false;

	spin_lock(&vgic_cpu->ap_list_lock);

	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
		spin_lock(&irq->irq_lock);
702
		pending = irq_is_pending(irq) && irq->enabled;
703 704 705 706 707 708 709 710 711 712
		spin_unlock(&irq->irq_lock);

		if (pending)
			break;
	}

	spin_unlock(&vgic_cpu->ap_list_lock);

	return pending;
}
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727

void vgic_kick_vcpus(struct kvm *kvm)
{
	struct kvm_vcpu *vcpu;
	int c;

	/*
	 * We've injected an interrupt, time to find out who deserves
	 * a good kick...
	 */
	kvm_for_each_vcpu(c, vcpu, kvm) {
		if (kvm_vgic_vcpu_pending_irq(vcpu))
			kvm_vcpu_kick(vcpu);
	}
}
728 729 730 731 732 733 734 735 736

bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
	bool map_is_active;

	spin_lock(&irq->irq_lock);
	map_is_active = irq->hw && irq->active;
	spin_unlock(&irq->irq_lock);
737
	vgic_put_irq(vcpu->kvm, irq);
738 739 740

	return map_is_active;
}
741