priv.c 10.7 KB
Newer Older
1
/*
2
 * handling privileged instructions
3 4 5 6 7 8 9 10 11 12 13 14
 *
 * Copyright IBM Corp. 2008
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
15
#include <linux/gfp.h>
16 17 18 19 20 21 22
#include <linux/errno.h>
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include "gaccess.h"
#include "kvm-s390.h"
23
#include "trace.h"
24 25 26 27 28 29 30 31 32 33 34 35 36

static int handle_set_prefix(struct kvm_vcpu *vcpu)
{
	int base2 = vcpu->arch.sie_block->ipb >> 28;
	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
	u64 operand2;
	u32 address = 0;
	u8 tmp;

	vcpu->stat.instruction_spx++;

	operand2 = disp2;
	if (base2)
37
		operand2 += vcpu->run->s.regs.gprs[base2];
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

	/* must be word boundary */
	if (operand2 & 3) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	/* get the value */
	if (get_guest_u32(vcpu, operand2, &address)) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	address = address & 0x7fffe000u;

	/* make sure that the new value is valid memory */
	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

60
	kvm_s390_set_prefix(vcpu, address);
61 62

	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
63
	trace_kvm_s390_handle_prefix(vcpu, 1, address);
64 65 66 67 68 69 70 71 72 73 74 75 76 77
out:
	return 0;
}

static int handle_store_prefix(struct kvm_vcpu *vcpu)
{
	int base2 = vcpu->arch.sie_block->ipb >> 28;
	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
	u64 operand2;
	u32 address;

	vcpu->stat.instruction_stpx++;
	operand2 = disp2;
	if (base2)
78
		operand2 += vcpu->run->s.regs.gprs[base2];
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95

	/* must be word boundary */
	if (operand2 & 3) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	address = vcpu->arch.sie_block->prefix;
	address = address & 0x7fffe000u;

	/* get the value */
	if (put_guest_u32(vcpu, operand2, address)) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
96
	trace_kvm_s390_handle_prefix(vcpu, 0, address);
97 98 99 100 101 102 103 104 105 106 107 108 109 110
out:
	return 0;
}

static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
{
	int base2 = vcpu->arch.sie_block->ipb >> 28;
	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
	u64 useraddr;
	int rc;

	vcpu->stat.instruction_stap++;
	useraddr = disp2;
	if (base2)
111
		useraddr += vcpu->run->s.regs.gprs[base2];
112 113 114 115 116 117 118 119 120 121 122 123

	if (useraddr & 1) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
	if (rc == -EFAULT) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

124
	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
125
	trace_kvm_s390_handle_stap(vcpu, useraddr);
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
out:
	return 0;
}

static int handle_skey(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_storage_key++;
	vcpu->arch.sie_block->gpsw.addr -= 4;
	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
	return 0;
}

static int handle_stsch(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_stsch++;
	VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
	/* condition code 3 */
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
	return 0;
}

static int handle_chsc(struct kvm_vcpu *vcpu)
{
	vcpu->stat.instruction_chsc++;
	VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
	/* condition code 3 */
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
	return 0;
}

static int handle_stfl(struct kvm_vcpu *vcpu)
{
160
	unsigned int facility_list;
161 162 163
	int rc;

	vcpu->stat.instruction_stfl++;
164
	/* only pass the facility bits, which we can handle */
165
	facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
166 167 168 169 170

	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
			   &facility_list, sizeof(facility_list));
	if (rc == -EFAULT)
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
171
	else {
172 173
		VCPU_EVENT(vcpu, 5, "store facility list value %x",
			   facility_list);
174 175
		trace_kvm_s390_handle_stfl(vcpu, facility_list);
	}
176 177 178 179 180 181 182 183 184 185 186 187 188
	return 0;
}

static int handle_stidp(struct kvm_vcpu *vcpu)
{
	int base2 = vcpu->arch.sie_block->ipb >> 28;
	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
	u64 operand2;
	int rc;

	vcpu->stat.instruction_stidp++;
	operand2 = disp2;
	if (base2)
189
		operand2 += vcpu->run->s.regs.gprs[base2];
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

	if (operand2 & 7) {
		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		goto out;
	}

	rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
	if (rc == -EFAULT) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out;
	}

	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
out:
	return 0;
}

static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
{
209
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
210 211 212
	int cpus = 0;
	int n;

213
	spin_lock(&fi->lock);
214 215 216
	for (n = 0; n < KVM_MAX_VCPUS; n++)
		if (fi->local_int[n])
			cpus++;
217
	spin_unlock(&fi->lock);
218 219

	/* deal with other level 3 hypervisors */
220
	if (stsi(mem, 3, 2, 2))
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
		mem->count = 0;
	if (mem->count < 8)
		mem->count++;
	for (n = mem->count - 1; n > 0 ; n--)
		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

	mem->vm[0].cpus_total = cpus;
	mem->vm[0].cpus_configured = cpus;
	mem->vm[0].cpus_standby = 0;
	mem->vm[0].cpus_reserved = 0;
	mem->vm[0].caf = 1000;
	memcpy(mem->vm[0].name, "KVMguest", 8);
	ASCEBC(mem->vm[0].name, 8);
	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
	ASCEBC(mem->vm[0].cpi, 16);
}

static int handle_stsi(struct kvm_vcpu *vcpu)
{
240 241 242
	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
243 244 245 246 247 248 249 250 251 252
	int base2 = vcpu->arch.sie_block->ipb >> 28;
	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
	u64 operand2;
	unsigned long mem;

	vcpu->stat.instruction_stsi++;
	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);

	operand2 = disp2;
	if (base2)
253
		operand2 += vcpu->run->s.regs.gprs[base2];
254 255 256 257 258 259

	if (operand2 & 0xfff && fc > 0)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (fc) {
	case 0:
260
		vcpu->run->s.regs.gprs[0] = 3 << 28;
261 262 263 264 265 266 267
		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
		return 0;
	case 1: /* same handling for 1 and 2 */
	case 2:
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
			goto out_fail;
268
		if (stsi((void *) mem, fc, sel1, sel2))
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
			goto out_mem;
		break;
	case 3:
		if (sel1 != 2 || sel2 != 2)
			goto out_fail;
		mem = get_zeroed_page(GFP_KERNEL);
		if (!mem)
			goto out_fail;
		handle_stsi_3_2_2(vcpu, (void *) mem);
		break;
	default:
		goto out_fail;
	}

	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		goto out_mem;
	}
287
	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
288 289
	free_page(mem);
	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
290
	vcpu->run->s.regs.gprs[0] = 0;
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
	return 0;
out_mem:
	free_page(mem);
out_fail:
	/* condition code 3 */
	vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
	return 0;
}

static intercept_handler_t priv_handlers[256] = {
	[0x02] = handle_stidp,
	[0x10] = handle_set_prefix,
	[0x11] = handle_store_prefix,
	[0x12] = handle_store_cpu_address,
	[0x29] = handle_skey,
	[0x2a] = handle_skey,
	[0x2b] = handle_skey,
	[0x34] = handle_stsch,
	[0x5f] = handle_chsc,
	[0x7d] = handle_stsi,
	[0xb1] = handle_stfl,
};

314
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
315 316 317
{
	intercept_handler_t handler;

318 319
	/*
	 * a lot of B2 instructions are priviledged. We first check for
L
Lucas De Marchi 已提交
320
	 * the privileged ones, that we can handle in the kernel. If the
321 322 323 324
	 * kernel can handle this instruction, we check for the problem
	 * state bit and (a) handle the instruction or (b) send a code 2
	 * program check.
	 * Anything else goes to userspace.*/
325
	handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
326 327 328 329 330 331 332
	if (handler) {
		if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
			return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);
		else
			return handler(vcpu);
	}
333
	return -EOPNOTSUPP;
334
}
335 336 337 338 339 340 341

static int handle_tprot(struct kvm_vcpu *vcpu)
{
	int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
	int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
	int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
	int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
342 343
	u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0;
	u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0;
344
	struct vm_area_struct *vma;
345
	unsigned long user_address;
346 347 348 349 350 351 352 353 354 355 356 357 358

	vcpu->stat.instruction_tprot++;

	/* we only handle the Linux memory detection case:
	 * access key == 0
	 * guest DAT == off
	 * everything else goes to userspace. */
	if (address2 & 0xf0)
		return -EOPNOTSUPP;
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
		return -EOPNOTSUPP;


359 360 361 362 363 364
	/* we must resolve the address without holding the mmap semaphore.
	 * This is ok since the userspace hypervisor is not supposed to change
	 * the mapping while the guest queries the memory. Otherwise the guest
	 * might crash or get wrong info anyway. */
	user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);

365
	down_read(&current->mm->mmap_sem);
366
	vma = find_vma(current->mm, user_address);
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
	if (!vma) {
		up_read(&current->mm->mmap_sem);
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	}

	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);

	up_read(&current->mm->mmap_sem);
	return 0;
}

int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
{
	/* For e5xx... instructions we only handle TPROT */
	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
		return handle_tprot(vcpu);
	return -EOPNOTSUPP;
}

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
static int handle_sckpf(struct kvm_vcpu *vcpu)
{
	u32 value;

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_PRIVILEGED_OPERATION);

	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
		return kvm_s390_inject_program_int(vcpu,
						   PGM_SPECIFICATION);

	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
	vcpu->arch.sie_block->todpr = value;

	return 0;
}

static intercept_handler_t x01_handlers[256] = {
	[0x07] = handle_sckpf,
};

int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}