diag.c 7.1 KB
Newer Older
1
/*
2
 * handling diagnose instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2011
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
16
#include <asm/pgalloc.h>
C
Cornelia Huck 已提交
17
#include <asm/virtio-ccw.h>
18
#include "kvm-s390.h"
19
#include "trace.h"
20
#include "trace-s390.h"
21
#include "gaccess.h"
22

23 24 25
static int diag_release_pages(struct kvm_vcpu *vcpu)
{
	unsigned long start, end;
26
	unsigned long prefix  = kvm_s390_get_prefix(vcpu);
27

28 29
	start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
	end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
30
	vcpu->stat.diagnose_10++;
31

32
	if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
33 34 35 36 37
	    || start < 2 * PAGE_SIZE)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);

38 39 40 41 42
	/*
	 * We checked for start >= end above, so lets check for the
	 * fast path (no prefix swap page involved)
	 */
	if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
43
		gmap_discard(vcpu->arch.gmap, start, end);
44
	} else {
45 46 47 48 49 50 51 52 53 54 55 56
		/*
		 * This is slow path.  gmap_discard will check for start
		 * so lets split this into before prefix, prefix, after
		 * prefix and let gmap_discard make some of these calls
		 * NOPs.
		 */
		gmap_discard(vcpu->arch.gmap, start, prefix);
		if (start <= prefix)
			gmap_discard(vcpu->arch.gmap, 0, 4096);
		if (end > prefix + 4096)
			gmap_discard(vcpu->arch.gmap, 4096, 8192);
		gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
57 58 59 60
	}
	return 0;
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
{
	struct prs_parm {
		u16 code;
		u16 subcode;
		u16 parm_len;
		u16 parm_version;
		u64 token_addr;
		u64 select_mask;
		u64 compare_mask;
		u64 zarch;
	};
	struct prs_parm parm;
	int rc;
	u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
	u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);

78
	vcpu->stat.diagnose_258++;
79 80
	if (vcpu->run->s.regs.gprs[rx] & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81
	rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
H
Heiko Carstens 已提交
82 83
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
	if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (parm.subcode) {
	case 0: /* TOKEN */
		if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
			/*
			 * If the pagefault handshake is already activated,
			 * the token must not be changed.  We have to return
			 * decimal 8 instead, as mandated in SC24-6084.
			 */
			vcpu->run->s.regs.gprs[ry] = 8;
			return 0;
		}

		if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
		    parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

H
Heiko Carstens 已提交
103
		if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		vcpu->arch.pfault_token = parm.token_addr;
		vcpu->arch.pfault_select = parm.select_mask;
		vcpu->arch.pfault_compare = parm.compare_mask;
		vcpu->run->s.regs.gprs[ry] = 0;
		rc = 0;
		break;
	case 1: /*
		 * CANCEL
		 * Specification allows to let already pending tokens survive
		 * the cancel, therefore to reduce code complexity, we assume
		 * all outstanding tokens are already pending.
		 */
		if (parm.token_addr || parm.select_mask ||
		    parm.compare_mask || parm.zarch)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

		vcpu->run->s.regs.gprs[ry] = 0;
		/*
		 * If the pfault handling was not established or is already
		 * canceled SC24-6084 requests to return decimal 4.
		 */
		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
			vcpu->run->s.regs.gprs[ry] = 4;
		else
			vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;

		rc = 0;
		break;
	default:
		rc = -EOPNOTSUPP;
		break;
	}

	return rc;
}

142 143 144 145
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
	vcpu->stat.diagnose_44++;
146
	kvm_vcpu_on_spin(vcpu);
147 148 149
	return 0;
}

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = vcpu->kvm;
	struct kvm_vcpu *tcpu;
	int tid;
	int i;

	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
	vcpu->stat.diagnose_9c++;
	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);

	if (tid == vcpu->vcpu_id)
		return 0;

	kvm_for_each_vcpu(i, tcpu, kvm)
		if (tcpu->vcpu_id == tid) {
			kvm_vcpu_yield_to(tcpu);
			break;
		}

	return 0;
}

173 174 175
static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
{
	unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
176
	unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
177 178

	VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
179
	vcpu->stat.diagnose_308++;
180 181 182 183 184 185 186 187
	switch (subcode) {
	case 3:
		vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
		break;
	case 4:
		vcpu->run->s390_reset_flags = 0;
		break;
	default:
188
		return -EOPNOTSUPP;
189 190
	}

191 192
	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
		kvm_s390_vcpu_stop(vcpu);
193 194 195 196
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
	vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
197
	VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
198
	  vcpu->run->s390_reset_flags);
199
	trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
200 201 202
	return -EREMOTE;
}

C
Cornelia Huck 已提交
203 204
static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{
205
	int ret;
C
Cornelia Huck 已提交
206

207
	vcpu->stat.diagnose_500++;
C
Cornelia Huck 已提交
208 209 210 211 212 213 214 215 216
	/* No virtio-ccw notification? Get out quickly. */
	if (!vcpu->kvm->arch.css_support ||
	    (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
		return -EOPNOTSUPP;

	/*
	 * The layout is as follows:
	 * - gpr 2 contains the subchannel id (passed as addr)
	 * - gpr 3 contains the virtqueue index (passed as datamatch)
217
	 * - gpr 4 contains the index on the bus (optionally)
C
Cornelia Huck 已提交
218
	 */
219
	ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
220
				      vcpu->run->s.regs.gprs[2] & 0xffffffff,
221 222 223 224 225 226 227 228 229 230
				      8, &vcpu->run->s.regs.gprs[3],
				      vcpu->run->s.regs.gprs[4]);

	/*
	 * Return cookie in gpr 2, but don't overwrite the register if the
	 * diagnose will be handled by userspace.
	 */
	if (ret != -EOPNOTSUPP)
		vcpu->run->s.regs.gprs[2] = ret;
	/* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
C
Cornelia Huck 已提交
231 232 233
	return ret < 0 ? ret : 0;
}

234 235
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
236
	int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
237

238 239 240
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

241
	trace_kvm_s390_handle_diag(vcpu, code);
242
	switch (code) {
243 244
	case 0x10:
		return diag_release_pages(vcpu);
245 246
	case 0x44:
		return __diag_time_slice_end(vcpu);
247 248
	case 0x9c:
		return __diag_time_slice_end_directed(vcpu);
249 250
	case 0x258:
		return __diag_page_ref_service(vcpu);
251 252
	case 0x308:
		return __diag_ipl_functions(vcpu);
C
Cornelia Huck 已提交
253 254
	case 0x500:
		return __diag_virtio_hypercall(vcpu);
255
	default:
256
		return -EOPNOTSUPP;
257 258
	}
}