intercept.c 6.7 KB
Newer Older
1
/*
2
 * in-kernel handling for sie intercepts
3
 *
4
 * Copyright IBM Corp. 2008, 2009
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm_host.h>
#include <linux/errno.h>
#include <linux/pagemap.h>

#include <asm/kvm_host.h>

#include "kvm-s390.h"
21
#include "gaccess.h"
22
#include "trace.h"
23
#include "trace-s390.h"
24

25
static int handle_lctlg(struct kvm_vcpu *vcpu)
26 27 28 29 30 31
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	int reg, rc;

32
	vcpu->stat.instruction_lctlg++;
33

34
	useraddr = kvm_s390_get_base_disp_rsy(vcpu);
35

36 37 38
	if (useraddr & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

39 40
	reg = reg1;

41 42
	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
43
	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
44 45

	do {
46
		rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
47
			       (u64 __user *) useraddr);
48 49
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
		useraddr += 8;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
	return 0;
}

static int handle_lctl(struct kvm_vcpu *vcpu)
{
	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
	u64 useraddr;
	u32 val = 0;
	int reg, rc;

	vcpu->stat.instruction_lctl++;

68
	useraddr = kvm_s390_get_base_disp_rs(vcpu);
69

70 71 72
	if (useraddr & 3)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

73 74
	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
		   useraddr);
75
	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
76 77 78

	reg = reg1;
	do {
79
		rc = get_guest(vcpu, val, (u32 __user *) useraddr);
80 81
		if (rc)
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
82 83 84 85 86 87 88 89 90 91
		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
		vcpu->arch.sie_block->gcr[reg] |= val;
		useraddr += 4;
		if (reg == reg3)
			break;
		reg = (reg + 1) % 16;
	} while (1);
	return 0;
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static const intercept_handler_t eb_handlers[256] = {
	[0x2f] = handle_lctlg,
	[0x8a] = kvm_s390_handle_priv_eb,
};

static int handle_eb(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
	if (handler)
		return handler(vcpu);
	return -EOPNOTSUPP;
}

107
static const intercept_handler_t instruction_handlers[256] = {
108
	[0x01] = kvm_s390_handle_01,
109
	[0x82] = kvm_s390_handle_lpsw,
110
	[0x83] = kvm_s390_handle_diag,
111
	[0xae] = kvm_s390_handle_sigp,
112
	[0xb2] = kvm_s390_handle_b2,
113
	[0xb7] = handle_lctl,
114
	[0xb9] = kvm_s390_handle_b9,
115
	[0xe5] = kvm_s390_handle_e5,
116
	[0xeb] = handle_eb,
117
};
118 119 120 121

static int handle_noop(struct kvm_vcpu *vcpu)
{
	switch (vcpu->arch.sie_block->icptcode) {
122 123 124
	case 0x0:
		vcpu->stat.exit_null++;
		break;
125 126 127 128 129 130 131 132 133 134 135 136 137 138
	case 0x10:
		vcpu->stat.exit_external_request++;
		break;
	case 0x14:
		vcpu->stat.exit_external_interrupt++;
		break;
	default:
		break; /* nothing */
	}
	return 0;
}

static int handle_stop(struct kvm_vcpu *vcpu)
{
139
	int rc = 0;
140

141
	vcpu->stat.exit_stop_request++;
142 143
	spin_lock_bh(&vcpu->arch.local_int.lock);

144 145
	trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);

146 147 148 149 150 151
	if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
		vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
		rc = SIE_INTERCEPT_RERUNVCPU;
		vcpu->run->exit_reason = KVM_EXIT_INTR;
	}

152
	if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
153 154
		atomic_set_mask(CPUSTAT_STOPPED,
				&vcpu->arch.sie_block->cpuflags);
155 156
		vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
		VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
157
		rc = -EOPNOTSUPP;
158 159
	}

160 161 162 163 164 165 166 167 168 169 170 171
	if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
		vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
		/* store status must be called unlocked. Since local_int.lock
		 * only protects local_int.* and not guest memory we can give
		 * up the lock here */
		spin_unlock_bh(&vcpu->arch.local_int.lock);
		rc = kvm_s390_vcpu_store_status(vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
		if (rc >= 0)
			rc = -EOPNOTSUPP;
	} else
		spin_unlock_bh(&vcpu->arch.local_int.lock);
172
	return rc;
173 174 175 176
}

static int handle_validity(struct kvm_vcpu *vcpu)
{
177
	unsigned long vmaddr;
178
	int viwhy = vcpu->arch.sie_block->ipb >> 16;
179 180
	int rc;

181
	vcpu->stat.exit_validity++;
182
	trace_kvm_s390_intercept_validity(vcpu, viwhy);
183 184 185 186 187 188 189 190 191
	if (viwhy == 0x37) {
		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
				    vcpu->arch.gmap);
		if (IS_ERR_VALUE(vmaddr)) {
			rc = -EOPNOTSUPP;
			goto out;
		}
		rc = fault_in_pages_writeable((char __user *) vmaddr,
			 PAGE_SIZE);
192
		if (rc) {
193
			/* user will receive sigsegv, exit to user */
194
			rc = -EOPNOTSUPP;
195 196
			goto out;
		}
197
		vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
198 199 200 201 202
				    vcpu->arch.gmap);
		if (IS_ERR_VALUE(vmaddr)) {
			rc = -EOPNOTSUPP;
			goto out;
		}
203 204 205 206
		rc = fault_in_pages_writeable((char __user *) vmaddr,
			 PAGE_SIZE);
		if (rc) {
			/* user will receive sigsegv, exit to user */
207 208 209
			rc = -EOPNOTSUPP;
			goto out;
		}
210
	} else
211
		rc = -EOPNOTSUPP;
212

213
out:
214 215 216 217
	if (rc)
		VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
			   viwhy);
	return rc;
218 219
}

220 221 222 223 224
static int handle_instruction(struct kvm_vcpu *vcpu)
{
	intercept_handler_t handler;

	vcpu->stat.exit_instruction++;
225 226 227
	trace_kvm_s390_intercept_instruction(vcpu,
					     vcpu->arch.sie_block->ipa,
					     vcpu->arch.sie_block->ipb);
228 229 230
	handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
	if (handler)
		return handler(vcpu);
231
	return -EOPNOTSUPP;
232 233 234 235 236
}

static int handle_prog(struct kvm_vcpu *vcpu)
{
	vcpu->stat.exit_program_interruption++;
237
	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
238 239 240 241 242 243 244 245 246 247 248
	return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
}

static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
{
	int rc, rc2;

	vcpu->stat.exit_instr_and_program++;
	rc = handle_instruction(vcpu);
	rc2 = handle_prog(vcpu);

249
	if (rc == -EOPNOTSUPP)
250 251 252 253 254 255
		vcpu->arch.sie_block->icptcode = 0x04;
	if (rc)
		return rc;
	return rc2;
}

256
static const intercept_handler_t intercept_funcs[] = {
257
	[0x00 >> 2] = handle_noop,
258 259 260
	[0x04 >> 2] = handle_instruction,
	[0x08 >> 2] = handle_prog,
	[0x0C >> 2] = handle_instruction_and_prog,
261 262
	[0x10 >> 2] = handle_noop,
	[0x14 >> 2] = handle_noop,
263
	[0x18 >> 2] = handle_noop,
264
	[0x1C >> 2] = kvm_s390_handle_wait,
265 266 267 268 269 270 271 272 273
	[0x20 >> 2] = handle_validity,
	[0x28 >> 2] = handle_stop,
};

int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
	intercept_handler_t func;
	u8 code = vcpu->arch.sie_block->icptcode;

274
	if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
275
		return -EOPNOTSUPP;
276 277 278
	func = intercept_funcs[code >> 2];
	if (func)
		return func(vcpu);
279
	return -EOPNOTSUPP;
280
}