vgic-v2-sr.c 4.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2012-2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/compiler.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kvm_host.h>

22
#include <asm/kvm_emulate.h>
23
#include <asm/kvm_hyp.h>
24

25 26 27
static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
{
	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
28
	int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
29 30 31 32 33 34 35 36 37 38 39
	u32 elrsr0, elrsr1;

	elrsr0 = readl_relaxed(base + GICH_ELRSR0);
	if (unlikely(nr_lr > 32))
		elrsr1 = readl_relaxed(base + GICH_ELRSR1);
	else
		elrsr1 = 0;

	cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
}

40 41 42 43
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
{
	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
	int i;
44
	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
45

46
	for (i = 0; i < used_lrs; i++) {
47
		if (cpu_if->vgic_elrsr & (1UL << i))
48
			cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
49 50
		else
			cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
51

52
		writel_relaxed(0, base + GICH_LR0 + (i * 4));
53 54 55
	}
}

56 57 58 59 60 61 62
/* vcpu is already in the HYP VA space */
void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
	struct vgic_dist *vgic = &kvm->arch.vgic;
	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
63
	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
64 65 66 67

	if (!base)
		return;

68
	if (used_lrs) {
69
		cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
70

71
		save_elrsr(vcpu, base);
72
		save_lrs(vcpu, base);
73

74 75 76 77 78
		writel_relaxed(0, base + GICH_HCR);
	} else {
		cpu_if->vgic_elrsr = ~0UL;
		cpu_if->vgic_apr = 0;
	}
79 80 81 82 83 84 85 86 87
}

/* vcpu is already in the HYP VA space */
void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
	struct vgic_dist *vgic = &kvm->arch.vgic;
	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
88
	int i;
89
	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
90 91 92 93

	if (!base)
		return;

94
	if (used_lrs) {
95 96
		writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
		writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
97
		for (i = 0; i < used_lrs; i++) {
98 99
			writel_relaxed(cpu_if->vgic_lr[i],
				       base + GICH_LR0 + (i * 4));
100 101
		}
	}
102
}
103 104

#ifdef CONFIG_ARM64
105 106 107 108 109 110 111 112 113 114 115 116
/*
 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
 *				     guest.
 *
 * @vcpu: the offending vcpu
 *
 * Returns:
 *  1: GICV access successfully performed
 *  0: Not a GICV access
 * -1: Illegal GICV access
 */
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
117
{
118 119 120 121 122 123 124 125 126 127 128 129 130
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_dist *vgic = &kvm->arch.vgic;
	phys_addr_t fault_ipa;
	void __iomem *addr;
	int rd;

	/* Build the full address */
	fault_ipa  = kvm_vcpu_get_fault_ipa(vcpu);
	fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);

	/* If not for GICV, move on */
	if (fault_ipa <  vgic->vgic_cpu_base ||
	    fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
131
		return 0;
132 133 134

	/* Reject anything but a 32bit access */
	if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
135
		return -1;
136 137 138

	/* Not aligned? Don't bother */
	if (fault_ipa & 3)
139
		return -1;
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155

	rd = kvm_vcpu_dabt_get_rd(vcpu);
	addr  = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
	addr += fault_ipa - vgic->vgic_cpu_base;

	if (kvm_vcpu_dabt_iswrite(vcpu)) {
		u32 data = vcpu_data_guest_to_host(vcpu,
						   vcpu_get_reg(vcpu, rd),
						   sizeof(u32));
		writel_relaxed(data, addr);
	} else {
		u32 data = readl_relaxed(addr);
		vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,
							       sizeof(u32)));
	}

156
	return 1;
157 158
}
#endif