psci.c 8.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) 2012 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

18
#include <linux/preempt.h>
19 20 21
#include <linux/kvm_host.h>
#include <linux/wait.h>

22
#include <asm/cputype.h>
23 24
#include <asm/kvm_emulate.h>
#include <asm/kvm_psci.h>
25
#include <asm/kvm_host.h>
26

27 28
#include <uapi/linux/psci.h>

29 30 31 32 33
/*
 * This is an implementation of the Power State Coordination Interface
 * as described in ARM document number ARM DEN 0022A.
 */

34 35 36 37 38 39 40 41 42 43
#define AFFINITY_MASK(level)	~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)

static unsigned long psci_affinity_mask(unsigned long affinity_level)
{
	if (affinity_level <= 3)
		return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);

	return 0;
}

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
{
	/*
	 * NOTE: For simplicity, we make VCPU suspend emulation to be
	 * same-as WFI (Wait-for-interrupt) emulation.
	 *
	 * This means for KVM the wakeup events are interrupts and
	 * this is consistent with intended use of StateID as described
	 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
	 *
	 * Further, we also treat power-down request to be same as
	 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
	 * specification (ARM DEN 0022A). This means all suspend states
	 * for KVM will preserve the register state.
	 */
	kvm_vcpu_block(vcpu);

	return PSCI_RET_SUCCESS;
}

64 65
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
{
66
	vcpu->arch.power_off = true;
67 68 69 70 71
}

static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
	struct kvm *kvm = source_vcpu->kvm;
72
	struct kvm_vcpu *vcpu = NULL;
73
	struct swait_queue_head *wq;
74
	unsigned long cpu_id;
75
	unsigned long context_id;
76 77
	phys_addr_t target_pc;

78
	cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
79 80 81
	if (vcpu_mode_is_32bit(source_vcpu))
		cpu_id &= ~((u32) 0);

82
	vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
83

84 85 86 87
	/*
	 * Make sure the caller requested a valid CPU and that the CPU is
	 * turned off.
	 */
88
	if (!vcpu)
89
		return PSCI_RET_INVALID_PARAMS;
90
	if (!vcpu->arch.power_off) {
91 92 93 94 95
		if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
			return PSCI_RET_ALREADY_ON;
		else
			return PSCI_RET_INVALID_PARAMS;
	}
96

97 98
	target_pc = vcpu_get_reg(source_vcpu, 2);
	context_id = vcpu_get_reg(source_vcpu, 3);
99 100 101 102 103 104 105 106 107

	kvm_reset_vcpu(vcpu);

	/* Gracefully handle Thumb2 entry point */
	if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
		target_pc &= ~((phys_addr_t) 1);
		vcpu_set_thumb(vcpu);
	}

108 109 110 111
	/* Propagate caller endianness */
	if (kvm_vcpu_is_be(source_vcpu))
		kvm_vcpu_set_be(vcpu);

112
	*vcpu_pc(vcpu) = target_pc;
113 114 115 116
	/*
	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
	 * the general puspose registers are undefined upon CPU_ON.
	 */
117
	vcpu_set_reg(vcpu, 0, context_id);
118
	vcpu->arch.power_off = false;
119 120
	smp_mb();		/* Make sure the above is visible */

121
	wq = kvm_arch_vcpu_wq(vcpu);
122
	swake_up(wq);
123

124
	return PSCI_RET_SUCCESS;
125 126
}

127 128
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
{
129
	int i, matching_cpus = 0;
130 131 132 133 134 135 136
	unsigned long mpidr;
	unsigned long target_affinity;
	unsigned long target_affinity_mask;
	unsigned long lowest_affinity_level;
	struct kvm *kvm = vcpu->kvm;
	struct kvm_vcpu *tmp;

137 138
	target_affinity = vcpu_get_reg(vcpu, 1);
	lowest_affinity_level = vcpu_get_reg(vcpu, 2);
139 140 141 142 143 144 145 146 147 148 149 150 151 152

	/* Determine target affinity mask */
	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
	if (!target_affinity_mask)
		return PSCI_RET_INVALID_PARAMS;

	/* Ignore other bits of target affinity */
	target_affinity &= target_affinity_mask;

	/*
	 * If one or more VCPU matching target affinity are running
	 * then ON else OFF
	 */
	kvm_for_each_vcpu(i, tmp, kvm) {
153
		mpidr = kvm_vcpu_get_mpidr_aff(tmp);
154 155
		if ((mpidr & target_affinity_mask) == target_affinity) {
			matching_cpus++;
156
			if (!tmp->arch.power_off)
157
				return PSCI_0_2_AFFINITY_LEVEL_ON;
158 159 160
		}
	}

161 162 163
	if (!matching_cpus)
		return PSCI_RET_INVALID_PARAMS;

164 165 166
	return PSCI_0_2_AFFINITY_LEVEL_OFF;
}

167 168
static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
{
169 170 171 172 173 174 175 176 177 178 179 180 181
	int i;
	struct kvm_vcpu *tmp;

	/*
	 * The KVM ABI specifies that a system event exit may call KVM_RUN
	 * again and may perform shutdown/reboot at a later time that when the
	 * actual request is made.  Since we are implementing PSCI and a
	 * caller of PSCI reboot and shutdown expects that the system shuts
	 * down or reboots immediately, let's make sure that VCPUs are not run
	 * after this call is handled and before the VCPUs have been
	 * re-initialized.
	 */
	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
182
		tmp->arch.power_off = true;
183 184 185
		kvm_vcpu_kick(tmp);
	}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
	vcpu->run->system_event.type = type;
	vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}

static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
{
	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
}

static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
{
	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
}

201 202 203 204 205 206 207 208
int kvm_psci_version(struct kvm_vcpu *vcpu)
{
	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
		return KVM_ARM_PSCI_0_2;

	return KVM_ARM_PSCI_0_1;
}

209
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210
{
211
	struct kvm *kvm = vcpu->kvm;
212
	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213
	unsigned long val;
214
	int ret = 1;
215 216 217 218 219 220 221 222 223

	switch (psci_fn) {
	case PSCI_0_2_FN_PSCI_VERSION:
		/*
		 * Bits[31:16] = Major Version = 0
		 * Bits[15:0] = Minor Version = 2
		 */
		val = 2;
		break;
224 225 226 227
	case PSCI_0_2_FN_CPU_SUSPEND:
	case PSCI_0_2_FN64_CPU_SUSPEND:
		val = kvm_psci_vcpu_suspend(vcpu);
		break;
228 229 230 231 232 233
	case PSCI_0_2_FN_CPU_OFF:
		kvm_psci_vcpu_off(vcpu);
		val = PSCI_RET_SUCCESS;
		break;
	case PSCI_0_2_FN_CPU_ON:
	case PSCI_0_2_FN64_CPU_ON:
234
		mutex_lock(&kvm->lock);
235
		val = kvm_psci_vcpu_on(vcpu);
236
		mutex_unlock(&kvm->lock);
237
		break;
238 239 240 241
	case PSCI_0_2_FN_AFFINITY_INFO:
	case PSCI_0_2_FN64_AFFINITY_INFO:
		val = kvm_psci_vcpu_affinity_info(vcpu);
		break;
242 243 244 245 246 247 248 249
	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
		/*
		 * Trusted OS is MP hence does not require migration
	         * or
		 * Trusted OS is not present
		 */
		val = PSCI_0_2_TOS_MP;
		break;
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
	case PSCI_0_2_FN_SYSTEM_OFF:
		kvm_psci_system_off(vcpu);
		/*
		 * We should'nt be going back to guest VCPU after
		 * receiving SYSTEM_OFF request.
		 *
		 * If user space accidently/deliberately resumes
		 * guest VCPU after SYSTEM_OFF request then guest
		 * VCPU should see internal failure from PSCI return
		 * value. To achieve this, we preload r0 (or x0) with
		 * PSCI return value INTERNAL_FAILURE.
		 */
		val = PSCI_RET_INTERNAL_FAILURE;
		ret = 0;
		break;
	case PSCI_0_2_FN_SYSTEM_RESET:
		kvm_psci_system_reset(vcpu);
		/*
		 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
		 * with PSCI return value INTERNAL_FAILURE.
		 */
		val = PSCI_RET_INTERNAL_FAILURE;
		ret = 0;
		break;
274
	default:
275 276
		val = PSCI_RET_NOT_SUPPORTED;
		break;
277 278
	}

279
	vcpu_set_reg(vcpu, 0, val);
280
	return ret;
281 282
}

283
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
284
{
285
	struct kvm *kvm = vcpu->kvm;
286
	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
287 288 289 290 291
	unsigned long val;

	switch (psci_fn) {
	case KVM_PSCI_FN_CPU_OFF:
		kvm_psci_vcpu_off(vcpu);
292
		val = PSCI_RET_SUCCESS;
293 294
		break;
	case KVM_PSCI_FN_CPU_ON:
295
		mutex_lock(&kvm->lock);
296
		val = kvm_psci_vcpu_on(vcpu);
297
		mutex_unlock(&kvm->lock);
298
		break;
299
	default:
300
		val = PSCI_RET_NOT_SUPPORTED;
301 302 303
		break;
	}

304
	vcpu_set_reg(vcpu, 0, val);
305
	return 1;
306
}
307 308 309 310 311 312

/**
 * kvm_psci_call - handle PSCI call if r0 value is in range
 * @vcpu: Pointer to the VCPU struct
 *
 * Handle PSCI calls from guests through traps from HVC instructions.
313 314 315 316 317 318 319 320
 * The calling convention is similar to SMC calls to the secure world
 * where the function number is placed in r0.
 *
 * This function returns: > 0 (success), 0 (success but exit to user
 * space), and < 0 (errors)
 *
 * Errors:
 * -EINVAL: Unrecognized PSCI function
321
 */
322
int kvm_psci_call(struct kvm_vcpu *vcpu)
323 324 325 326 327 328 329
{
	switch (kvm_psci_version(vcpu)) {
	case KVM_ARM_PSCI_0_2:
		return kvm_psci_0_2_call(vcpu);
	case KVM_ARM_PSCI_0_1:
		return kvm_psci_0_1_call(vcpu);
	default:
330
		return -EINVAL;
331 332
	};
}