vgic-mmio.c 22.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10
/*
 * VGIC MMIO handling functions
 */

#include <linux/bitops.h>
#include <linux/bsearch.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/iodev.h>
11
#include <kvm/arm_arch_timer.h>
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include <kvm/arm_vgic.h>

#include "vgic.h"
#include "vgic-mmio.h"

unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
				 gpa_t addr, unsigned int len)
{
	return 0;
}

unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
				 gpa_t addr, unsigned int len)
{
	return -1UL;
}

void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
			unsigned int len, unsigned long val)
{
	/* Ignore */
}

35 36 37 38 39 40 41
int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
			       unsigned int len, unsigned long val)
{
	/* Ignore */
	return 0;
}

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
				   gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	u32 value = 0;
	int i;

	/* Loop over all IRQs affected by this read */
	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		if (irq->group)
			value |= BIT(i);

		vgic_put_irq(vcpu->kvm, irq);
	}

	return value;
}

void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
			   unsigned int len, unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
	unsigned long flags;

	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

72
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
73 74 75 76 77 78 79
		irq->group = !!(val & BIT(i));
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);

		vgic_put_irq(vcpu->kvm, irq);
	}
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
/*
 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
 * of the enabled bit, so there is only one function for both here.
 */
unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
				    gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	u32 value = 0;
	int i;

	/* Loop over all IRQs affected by this read */
	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		if (irq->enabled)
			value |= (1U << i);
97 98

		vgic_put_irq(vcpu->kvm, irq);
99 100 101 102 103 104 105 106 107 108 109
	}

	return value;
}

void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
			     gpa_t addr, unsigned int len,
			     unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
110
	unsigned long flags;
111 112 113 114

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

115
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
		if (vgic_irq_is_mapped_level(irq)) {
			bool was_high = irq->line_level;

			/*
			 * We need to update the state of the interrupt because
			 * the guest might have changed the state of the device
			 * while the interrupt was disabled at the VGIC level.
			 */
			irq->line_level = vgic_get_phys_line_level(irq);
			/*
			 * Deactivate the physical interrupt so the GIC will let
			 * us know when it is asserted again.
			 */
			if (!irq->active && was_high && !irq->line_level)
				vgic_irq_set_phys_active(irq, false);
		}
132
		irq->enabled = true;
133
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
134 135

		vgic_put_irq(vcpu->kvm, irq);
136 137 138 139 140 141 142 143 144
	}
}

void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
			     gpa_t addr, unsigned int len,
			     unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
145
	unsigned long flags;
146 147 148 149

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

150
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 152 153

		irq->enabled = false;

154
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
155
		vgic_put_irq(vcpu->kvm, irq);
156 157 158
	}
}

159 160 161 162 163 164 165 166 167 168
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
				     gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	u32 value = 0;
	int i;

	/* Loop over all IRQs affected by this read */
	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
169
		unsigned long flags;
170

171
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
172
		if (irq_is_pending(irq))
173
			value |= (1U << i);
174
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
175 176

		vgic_put_irq(vcpu->kvm, irq);
177 178 179 180 181
	}

	return value;
}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
/*
 * This function will return the VCPU that performed the MMIO access and
 * trapped from within the VM, and will return NULL if this is a userspace
 * access.
 *
 * We can disable preemption locally around accessing the per-CPU variable,
 * and use the resolved vcpu pointer after enabling preemption again, because
 * even if the current thread is migrated to another CPU, reading the per-CPU
 * value later will give us the same value as we update the per-CPU variable
 * in the preempt notifier handlers.
 */
static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
{
	struct kvm_vcpu *vcpu;

	preempt_disable();
	vcpu = kvm_arm_get_running_vcpu();
	preempt_enable();
	return vcpu;
}

203 204 205 206 207 208 209 210 211 212 213
/* Must be called with irq->irq_lock held */
static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
				 bool is_uaccess)
{
	if (is_uaccess)
		return;

	irq->pending_latch = true;
	vgic_irq_set_phys_active(irq, true);
}

214 215 216 217
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
			      gpa_t addr, unsigned int len,
			      unsigned long val)
{
218
	bool is_uaccess = !vgic_get_mmio_requester_vcpu();
219 220
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
221
	unsigned long flags;
222 223 224 225

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

226
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
227 228 229 230
		if (irq->hw)
			vgic_hw_irq_spending(vcpu, irq, is_uaccess);
		else
			irq->pending_latch = true;
231
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
232
		vgic_put_irq(vcpu->kvm, irq);
233 234 235
	}
}

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
/* Must be called with irq->irq_lock held */
static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
				 bool is_uaccess)
{
	if (is_uaccess)
		return;

	irq->pending_latch = false;

	/*
	 * We don't want the guest to effectively mask the physical
	 * interrupt by doing a write to SPENDR followed by a write to
	 * CPENDR for HW interrupts, so we clear the active state on
	 * the physical side if the virtual interrupt is not active.
	 * This may lead to taking an additional interrupt on the
	 * host, but that should not be a problem as the worst that
	 * can happen is an additional vgic injection.  We also clear
	 * the pending state to maintain proper semantics for edge HW
	 * interrupts.
	 */
	vgic_irq_set_phys_pending(irq, false);
	if (!irq->active)
		vgic_irq_set_phys_active(irq, false);
}

261 262 263 264
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
			      gpa_t addr, unsigned int len,
			      unsigned long val)
{
265
	bool is_uaccess = !vgic_get_mmio_requester_vcpu();
266 267
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
268
	unsigned long flags;
269 270 271 272

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

273
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
274

275 276 277 278
		if (irq->hw)
			vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
		else
			irq->pending_latch = false;
279

280
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
281
		vgic_put_irq(vcpu->kvm, irq);
282 283 284
	}
}

285 286 287 288 289 290 291 292 293 294 295 296 297
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
				    gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	u32 value = 0;
	int i;

	/* Loop over all IRQs affected by this read */
	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		if (irq->active)
			value |= (1U << i);
298 299

		vgic_put_irq(vcpu->kvm, irq);
300 301 302 303 304
	}

	return value;
}

305 306 307 308 309 310 311 312 313 314 315
/* Must be called with irq->irq_lock held */
static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
				      bool active, bool is_uaccess)
{
	if (is_uaccess)
		return;

	irq->active = active;
	vgic_irq_set_phys_active(irq, active);
}

316
static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
317
				    bool active)
318
{
319
	unsigned long flags;
320
	struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
321

322
	raw_spin_lock_irqsave(&irq->irq_lock, flags);
323

324
	if (irq->hw) {
325
		vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
326 327
	} else {
		u32 model = vcpu->kvm->arch.vgic.vgic_model;
328
		u8 active_source;
329

330
		irq->active = active;
331 332 333 334 335 336 337 338 339 340 341 342 343 344

		/*
		 * The GICv2 architecture indicates that the source CPUID for
		 * an SGI should be provided during an EOI which implies that
		 * the active state is stored somewhere, but at the same time
		 * this state is not architecturally exposed anywhere and we
		 * have no way of knowing the right source.
		 *
		 * This may lead to a VCPU not being able to receive
		 * additional instances of a particular SGI after migration
		 * for a GICv2 VM on some GIC implementations.  Oh well.
		 */
		active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;

345 346
		if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
		    active && vgic_irq_is_sgi(irq->intid))
347
			irq->active_source = active_source;
348
	}
349 350

	if (irq->active)
351
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
352
	else
353
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
354 355 356 357 358 359 360 361 362 363 364 365
}

/*
 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
 * is not queued on some running VCPU's LRs, because then the change to the
 * active state can be overwritten when the VCPU's state is synced coming back
 * from the guest.
 *
 * For shared interrupts, we have to stop all the VCPUs because interrupts can
 * be migrated while we don't hold the IRQ locks and we don't want to be
 * chasing moving targets.
 *
366 367 368 369
 * For private interrupts we don't have to do anything because userspace
 * accesses to the VGIC state already require all VCPUs to be stopped, and
 * only the VCPU itself can modify its private interrupts active state, which
 * guarantees that the VCPU is not running.
370 371 372
 */
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{
373 374
	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
	    intid > VGIC_NR_PRIVATE_IRQS)
375 376 377 378 379 380
		kvm_arm_halt_guest(vcpu->kvm);
}

/* See vgic_change_active_prepare */
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{
381 382
	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
	    intid > VGIC_NR_PRIVATE_IRQS)
383 384 385
		kvm_arm_resume_guest(vcpu->kvm);
}

386 387 388
static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
				      gpa_t addr, unsigned int len,
				      unsigned long val)
389 390 391 392 393 394
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
395
		vgic_mmio_change_active(vcpu, irq, false);
396
		vgic_put_irq(vcpu->kvm, irq);
397 398 399
	}
}

400
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
401 402 403 404 405
			     gpa_t addr, unsigned int len,
			     unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);

406
	mutex_lock(&vcpu->kvm->lock);
407
	vgic_change_active_prepare(vcpu, intid);
408 409 410 411

	__vgic_mmio_write_cactive(vcpu, addr, len, val);

	vgic_change_active_finish(vcpu, intid);
412
	mutex_unlock(&vcpu->kvm->lock);
413 414
}

415
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
416 417 418 419
				     gpa_t addr, unsigned int len,
				     unsigned long val)
{
	__vgic_mmio_write_cactive(vcpu, addr, len, val);
420
	return 0;
421 422 423 424 425 426 427 428 429
}

static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
				      gpa_t addr, unsigned int len,
				      unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;

430 431
	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
432
		vgic_mmio_change_active(vcpu, irq, true);
433
		vgic_put_irq(vcpu->kvm, irq);
434
	}
435 436 437 438 439 440 441 442
}

void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
			     gpa_t addr, unsigned int len,
			     unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);

443
	mutex_lock(&vcpu->kvm->lock);
444 445 446 447
	vgic_change_active_prepare(vcpu, intid);

	__vgic_mmio_write_sactive(vcpu, addr, len, val);

448
	vgic_change_active_finish(vcpu, intid);
449
	mutex_unlock(&vcpu->kvm->lock);
450 451
}

452
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
453 454 455 456
				     gpa_t addr, unsigned int len,
				     unsigned long val)
{
	__vgic_mmio_write_sactive(vcpu, addr, len, val);
457
	return 0;
458 459
}

460 461 462 463 464 465 466 467 468 469 470
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
				      gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
	int i;
	u64 val = 0;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		val |= (u64)irq->priority << (i * 8);
471 472

		vgic_put_irq(vcpu->kvm, irq);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	}

	return val;
}

/*
 * We currently don't handle changing the priority of an interrupt that
 * is already pending on a VCPU. If there is a need for this, we would
 * need to make this VCPU exit and re-evaluate the priorities, potentially
 * leading to this interrupt getting presented now to the guest (if it has
 * been masked by the priority mask before).
 */
void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
			      gpa_t addr, unsigned int len,
			      unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
	int i;
491
	unsigned long flags;
492 493 494 495

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

496
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
497 498
		/* Narrow the priority range to what we actually support */
		irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
499
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
500 501

		vgic_put_irq(vcpu->kvm, irq);
502 503 504
	}
}

505 506 507 508 509 510 511 512 513 514 515 516
unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
				    gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
	u32 value = 0;
	int i;

	for (i = 0; i < len * 4; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		if (irq->config == VGIC_CONFIG_EDGE)
			value |= (2U << (i * 2));
517 518

		vgic_put_irq(vcpu->kvm, irq);
519 520 521 522 523 524 525 526 527 528 529
	}

	return value;
}

void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
			    gpa_t addr, unsigned int len,
			    unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
	int i;
530
	unsigned long flags;
531 532

	for (i = 0; i < len * 4; i++) {
533
		struct vgic_irq *irq;
534 535 536 537 538 539 540 541 542 543

		/*
		 * The configuration cannot be changed for SGIs in general,
		 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
		 * code relies on PPIs being level triggered, so we also
		 * make them read-only here.
		 */
		if (intid + i < VGIC_NR_PRIVATE_IRQS)
			continue;

544
		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
545
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
546

547
		if (test_bit(i * 2 + 1, &val))
548
			irq->config = VGIC_CONFIG_EDGE;
549
		else
550
			irq->config = VGIC_CONFIG_LEVEL;
551

552
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
553
		vgic_put_irq(vcpu->kvm, irq);
554 555 556
	}
}

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
{
	int i;
	u64 val = 0;
	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;

	for (i = 0; i < 32; i++) {
		struct vgic_irq *irq;

		if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
			continue;

		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
			val |= (1U << i);

		vgic_put_irq(vcpu->kvm, irq);
	}

	return val;
}

void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
				    const u64 val)
{
	int i;
	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
584
	unsigned long flags;
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600

	for (i = 0; i < 32; i++) {
		struct vgic_irq *irq;
		bool new_level;

		if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
			continue;

		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		/*
		 * Line level is set irrespective of irq type
		 * (level or edge) to avoid dependency that VM should
		 * restore irq config before line level.
		 */
		new_level = !!(val & (1U << i));
601
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
602 603
		irq->line_level = new_level;
		if (new_level)
604
			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
605
		else
606
			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
607 608 609 610 611

		vgic_put_irq(vcpu->kvm, irq);
	}
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625
static int match_region(const void *key, const void *elt)
{
	const unsigned int offset = (unsigned long)key;
	const struct vgic_register_region *region = elt;

	if (offset < region->reg_offset)
		return -1;

	if (offset >= region->reg_offset + region->len)
		return 1;

	return 0;
}

626 627 628
const struct vgic_register_region *
vgic_find_mmio_region(const struct vgic_register_region *regions,
		      int nr_regions, unsigned int offset)
629
{
630 631
	return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
		       sizeof(regions[0]), match_region);
632 633
}

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_set_vmcr(vcpu, vmcr);
	else
		vgic_v3_set_vmcr(vcpu, vmcr);
}

void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
{
	if (kvm_vgic_global_state.type == VGIC_V2)
		vgic_v2_get_vmcr(vcpu, vmcr);
	else
		vgic_v3_get_vmcr(vcpu, vmcr);
}

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
/*
 * kvm_mmio_read_buf() returns a value in a format where it can be converted
 * to a byte array and be directly observed as the guest wanted it to appear
 * in memory if it had done the store itself, which is LE for the GIC, as the
 * guest knows the GIC is always LE.
 *
 * We convert this value to the CPUs native format to deal with it as a data
 * value.
 */
unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
{
	unsigned long data = kvm_mmio_read_buf(val, len);

	switch (len) {
	case 1:
		return data;
	case 2:
		return le16_to_cpu(data);
	case 4:
		return le32_to_cpu(data);
	default:
		return le64_to_cpu(data);
	}
}

/*
 * kvm_mmio_write_buf() expects a value in a format such that if converted to
 * a byte array it is observed as the guest would see it if it could perform
 * the load directly.  Since the GIC is LE, and the guest knows this, the
 * guest expects a value in little endian format.
 *
 * We convert the data value from the CPUs native format to LE so that the
 * value is returned in the proper format.
 */
void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
				unsigned long data)
{
	switch (len) {
	case 1:
		break;
	case 2:
		data = cpu_to_le16(data);
		break;
	case 4:
		data = cpu_to_le32(data);
		break;
	default:
		data = cpu_to_le64(data);
	}

	kvm_mmio_write_buf(buf, len, data);
}

static
struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
{
	return container_of(dev, struct vgic_io_device, dev);
}

709 710
static bool check_region(const struct kvm *kvm,
			 const struct vgic_register_region *region,
711 712
			 gpa_t addr, int len)
{
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
	int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;

	switch (len) {
	case sizeof(u8):
		flags = VGIC_ACCESS_8bit;
		break;
	case sizeof(u32):
		flags = VGIC_ACCESS_32bit;
		break;
	case sizeof(u64):
		flags = VGIC_ACCESS_64bit;
		break;
	default:
		return false;
	}

	if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
		if (!region->bits_per_irq)
			return true;

		/* Do we access a non-allocated IRQ? */
		return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
	}
736 737 738 739

	return false;
}

740
const struct vgic_register_region *
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
		     gpa_t addr, int len)
{
	const struct vgic_register_region *region;

	region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
				       addr - iodev->base_addr);
	if (!region || !check_region(vcpu->kvm, region, addr, len))
		return NULL;

	return region;
}

static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			     gpa_t addr, u32 *val)
{
	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
	const struct vgic_register_region *region;
	struct kvm_vcpu *r_vcpu;

	region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
	if (!region) {
		*val = 0;
		return 0;
	}

	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
	if (region->uaccess_read)
		*val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
	else
		*val = region->read(r_vcpu, addr, sizeof(u32));

	return 0;
}

static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			      gpa_t addr, const u32 *val)
{
	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
	const struct vgic_register_region *region;
	struct kvm_vcpu *r_vcpu;

	region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
	if (!region)
		return 0;

	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
	if (region->uaccess_write)
789
		return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
790

791
	region->write(r_vcpu, addr, sizeof(u32), *val);
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	return 0;
}

/*
 * Userland access to VGIC registers.
 */
int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
		 bool is_write, int offset, u32 *val)
{
	if (is_write)
		return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
	else
		return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
}

807 808 809 810 811
static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			      gpa_t addr, int len, void *val)
{
	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
	const struct vgic_register_region *region;
812
	unsigned long data = 0;
813

814 815
	region = vgic_get_mmio_region(vcpu, iodev, addr, len);
	if (!region) {
816 817 818 819
		memset(val, 0, len);
		return 0;
	}

820 821
	switch (iodev->iodev_type) {
	case IODEV_CPUIF:
822 823
		data = region->read(vcpu, addr, len);
		break;
824 825 826 827 828 829 830 831 832 833 834
	case IODEV_DIST:
		data = region->read(vcpu, addr, len);
		break;
	case IODEV_REDIST:
		data = region->read(iodev->redist_vcpu, addr, len);
		break;
	case IODEV_ITS:
		data = region->its_read(vcpu->kvm, iodev->its, addr, len);
		break;
	}

835 836 837 838 839 840 841 842 843 844 845
	vgic_data_host_to_mmio_bus(val, len, data);
	return 0;
}

static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
			       gpa_t addr, int len, const void *val)
{
	struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
	const struct vgic_register_region *region;
	unsigned long data = vgic_data_mmio_bus_to_host(val, len);

846 847
	region = vgic_get_mmio_region(vcpu, iodev, addr, len);
	if (!region)
848 849
		return 0;

850 851
	switch (iodev->iodev_type) {
	case IODEV_CPUIF:
852
		region->write(vcpu, addr, len, data);
853 854 855 856 857 858 859 860 861 862 863 864
		break;
	case IODEV_DIST:
		region->write(vcpu, addr, len, data);
		break;
	case IODEV_REDIST:
		region->write(iodev->redist_vcpu, addr, len, data);
		break;
	case IODEV_ITS:
		region->its_write(vcpu->kvm, iodev->its, addr, len, data);
		break;
	}

865 866 867 868 869 870 871
	return 0;
}

struct kvm_io_device_ops kvm_io_gic_ops = {
	.read = dispatch_mmio_read,
	.write = dispatch_mmio_write,
};
872 873 874 875 876 877 878 879 880 881 882 883

int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
			     enum vgic_type type)
{
	struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
	int ret = 0;
	unsigned int len;

	switch (type) {
	case VGIC_V2:
		len = vgic_v2_init_dist_iodev(io_device);
		break;
884 885 886
	case VGIC_V3:
		len = vgic_v3_init_dist_iodev(io_device);
		break;
887 888 889 890 891
	default:
		BUG_ON(1);
	}

	io_device->base_addr = dist_base_address;
892
	io_device->iodev_type = IODEV_DIST;
893 894 895 896 897 898 899 900 901
	io_device->redist_vcpu = NULL;

	mutex_lock(&kvm->slots_lock);
	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
				      len, &io_device->dev);
	mutex_unlock(&kvm->slots_lock);

	return ret;
}
新手
引导
客服 返回
顶部