vgic-mmio-v2.c 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * VGICv2 MMIO handling functions
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/irqchip/arm-gic.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/iodev.h>
#include <kvm/arm_vgic.h>

#include "vgic.h"
#include "vgic-mmio.h"

23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
					    gpa_t addr, unsigned int len)
{
	u32 value;

	switch (addr & 0x0c) {
	case GIC_DIST_CTRL:
		value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
		break;
	case GIC_DIST_CTR:
		value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
		value = (value >> 5) - 1;
		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
		break;
	case GIC_DIST_IIDR:
		value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
		break;
	default:
		return 0;
	}

	return value;
}

static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
				    gpa_t addr, unsigned int len,
				    unsigned long val)
{
	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
	bool was_enabled = dist->enabled;

	switch (addr & 0x0c) {
	case GIC_DIST_CTRL:
		dist->enabled = val & GICD_ENABLE;
		if (!was_enabled && dist->enabled)
			vgic_kick_vcpus(vcpu->kvm);
		break;
	case GIC_DIST_CTR:
	case GIC_DIST_IIDR:
		/* Nothing to do */
		return;
	}
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
				 gpa_t addr, unsigned int len,
				 unsigned long val)
{
	int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
	int intid = val & 0xf;
	int targets = (val >> 16) & 0xff;
	int mode = (val >> 24) & 0x03;
	int c;
	struct kvm_vcpu *vcpu;

	switch (mode) {
	case 0x0:		/* as specified by targets */
		break;
	case 0x1:
		targets = (1U << nr_vcpus) - 1;			/* all, ... */
		targets &= ~(1U << source_vcpu->vcpu_id);	/* but self */
		break;
	case 0x2:		/* this very vCPU only */
		targets = (1U << source_vcpu->vcpu_id);
		break;
	case 0x3:		/* reserved */
		return;
	}

	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
		struct vgic_irq *irq;

		if (!(targets & (1U << c)))
			continue;

		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);

		spin_lock(&irq->irq_lock);
101
		irq->pending_latch = true;
102 103 104
		irq->source |= 1U << source_vcpu->vcpu_id;

		vgic_queue_irq_unlock(source_vcpu->kvm, irq);
105
		vgic_put_irq(source_vcpu->kvm, irq);
106 107 108
	}
}

109 110 111 112 113 114 115 116 117 118 119
static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
					   gpa_t addr, unsigned int len)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
	int i;
	u64 val = 0;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		val |= (u64)irq->targets << (i * 8);
120 121

		vgic_put_irq(vcpu->kvm, irq);
122 123 124 125 126 127 128 129 130 131
	}

	return val;
}

static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
				   gpa_t addr, unsigned int len,
				   unsigned long val)
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
132
	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
133 134 135 136 137 138 139 140 141 142 143 144
	int i;

	/* GICD_ITARGETSR[0-7] are read-only */
	if (intid < VGIC_NR_PRIVATE_IRQS)
		return;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
		int target;

		spin_lock(&irq->irq_lock);

145
		irq->targets = (val >> (i * 8)) & cpu_mask;
146 147 148 149
		target = irq->targets ? __ffs(irq->targets) : 0;
		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);

		spin_unlock(&irq->irq_lock);
150
		vgic_put_irq(vcpu->kvm, irq);
151 152 153
	}
}

154 155 156 157 158 159 160 161 162 163 164
static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
					    gpa_t addr, unsigned int len)
{
	u32 intid = addr & 0x0f;
	int i;
	u64 val = 0;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		val |= (u64)irq->source << (i * 8);
165 166

		vgic_put_irq(vcpu->kvm, irq);
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
	}
	return val;
}

static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
				     gpa_t addr, unsigned int len,
				     unsigned long val)
{
	u32 intid = addr & 0x0f;
	int i;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);

		irq->source &= ~((val >> (i * 8)) & 0xff);
		if (!irq->source)
185
			irq->pending_latch = false;
186 187

		spin_unlock(&irq->irq_lock);
188
		vgic_put_irq(vcpu->kvm, irq);
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	}
}

static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
				     gpa_t addr, unsigned int len,
				     unsigned long val)
{
	u32 intid = addr & 0x0f;
	int i;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);

		irq->source |= (val >> (i * 8)) & 0xff;

		if (irq->source) {
207
			irq->pending_latch = true;
208 209 210 211
			vgic_queue_irq_unlock(vcpu->kvm, irq);
		} else {
			spin_unlock(&irq->irq_lock);
		}
212
		vgic_put_irq(vcpu->kvm, irq);
213 214 215
	}
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
#define GICC_ARCH_VERSION_V2	0x2

/* These are for userland accesses only, there is no guest-facing emulation. */
static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
					   gpa_t addr, unsigned int len)
{
	struct vgic_vmcr vmcr;
	u32 val;

	vgic_get_vmcr(vcpu, &vmcr);

	switch (addr & 0xff) {
	case GIC_CPU_CTRL:
		val = vmcr.ctlr;
		break;
	case GIC_CPU_PRIMASK:
		val = vmcr.pmr;
		break;
	case GIC_CPU_BINPOINT:
		val = vmcr.bpr;
		break;
	case GIC_CPU_ALIAS_BINPOINT:
		val = vmcr.abpr;
		break;
	case GIC_CPU_IDENT:
		val = ((PRODUCT_ID_KVM << 20) |
		       (GICC_ARCH_VERSION_V2 << 16) |
		       IMPLEMENTER_ARM);
		break;
	default:
		return 0;
	}

	return val;
}

static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
				   gpa_t addr, unsigned int len,
				   unsigned long val)
{
	struct vgic_vmcr vmcr;

	vgic_get_vmcr(vcpu, &vmcr);

	switch (addr & 0xff) {
	case GIC_CPU_CTRL:
		vmcr.ctlr = val;
		break;
	case GIC_CPU_PRIMASK:
		vmcr.pmr = val;
		break;
	case GIC_CPU_BINPOINT:
		vmcr.bpr = val;
		break;
	case GIC_CPU_ALIAS_BINPOINT:
		vmcr.abpr = val;
		break;
	}

	vgic_set_vmcr(vcpu, &vmcr);
}

278 279
static const struct vgic_register_region vgic_v2_dist_registers[] = {
	REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
280
		vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
281 282 283 284 285
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
		vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
286
		vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
287 288
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
289
		vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
290 291
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
292
		vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
293 294
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
295
		vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
296 297
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
298
		vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
299 300
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
301
		vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
302 303
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
304
		vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
305 306
		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
307
		vgic_mmio_read_target, vgic_mmio_write_target, 8,
308 309
		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
310
		vgic_mmio_read_config, vgic_mmio_write_config, 2,
311 312
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
313
		vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
314 315
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
316
		vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
317 318
		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
319
		vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
320 321 322
		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
};

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
static const struct vgic_register_region vgic_v2_cpu_registers[] = {
	REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
		vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
		VGIC_ACCESS_32bit),
	REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
		VGIC_ACCESS_32bit),
};

344 345 346 347 348 349 350 351 352
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
{
	dev->regions = vgic_v2_dist_registers;
	dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);

	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);

	return SZ_4K;
}
353 354 355

int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
{
356 357 358 359
	const struct vgic_register_region *region;
	struct vgic_io_device iodev;
	struct vgic_reg_attr reg_attr;
	struct kvm_vcpu *vcpu;
360
	gpa_t addr;
361 362 363 364 365
	int ret;

	ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
	if (ret)
		return ret;
366

367 368
	vcpu = reg_attr.vcpu;
	addr = reg_attr.addr;
369 370 371

	switch (attr->group) {
	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
372 373 374
		iodev.regions = vgic_v2_dist_registers;
		iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
		iodev.base_addr = 0;
375 376
		break;
	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
377 378 379
		iodev.regions = vgic_v2_cpu_registers;
		iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
		iodev.base_addr = 0;
380
		break;
381 382 383 384 385 386 387 388
	default:
		return -ENXIO;
	}

	/* We only support aligned 32-bit accesses. */
	if (addr & 3)
		return -ENXIO;

389 390 391
	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
	if (!region)
		return -ENXIO;
392

393
	return 0;
394
}
395

396 397 398 399 400 401
int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
			  int offset, u32 *val)
{
	struct vgic_io_device dev = {
		.regions = vgic_v2_cpu_registers,
		.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
402
		.iodev_type = IODEV_CPUIF,
403 404 405 406 407
	};

	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
}

408 409 410 411 412 413
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
			 int offset, u32 *val)
{
	struct vgic_io_device dev = {
		.regions = vgic_v2_dist_registers,
		.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
414
		.iodev_type = IODEV_DIST,
415 416 417 418
	};

	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
}