diag.c 6.8 KB
Newer Older
1
/*
2
 * handling diagnose instructions
3
 *
4
 * Copyright IBM Corp. 2008, 2011
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm.h>
#include <linux/kvm_host.h>
16
#include <asm/pgalloc.h>
C
Cornelia Huck 已提交
17
#include <asm/virtio-ccw.h>
18
#include "kvm-s390.h"
19
#include "trace.h"
20
#include "trace-s390.h"
21
#include "gaccess.h"
22

23 24 25 26 27
static int diag_release_pages(struct kvm_vcpu *vcpu)
{
	unsigned long start, end;
	unsigned long prefix  = vcpu->arch.sie_block->prefix;

28 29
	start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
	end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50

	if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end
	    || start < 2 * PAGE_SIZE)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
	vcpu->stat.diagnose_10++;

	/* we checked for start > end above */
	if (end < prefix || start >= prefix + 2 * PAGE_SIZE) {
		gmap_discard(start, end, vcpu->arch.gmap);
	} else {
		if (start < prefix)
			gmap_discard(start, prefix, vcpu->arch.gmap);
		if (end >= prefix)
			gmap_discard(prefix + 2 * PAGE_SIZE,
				     end, vcpu->arch.gmap);
	}
	return 0;
}

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
{
	struct prs_parm {
		u16 code;
		u16 subcode;
		u16 parm_len;
		u16 parm_version;
		u64 token_addr;
		u64 select_mask;
		u64 compare_mask;
		u64 zarch;
	};
	struct prs_parm parm;
	int rc;
	u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
	u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
	unsigned long hva_token = KVM_HVA_ERR_BAD;

	if (vcpu->run->s.regs.gprs[rx] & 7)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	if (copy_from_guest(vcpu, &parm, vcpu->run->s.regs.gprs[rx], sizeof(parm)))
		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
	if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	switch (parm.subcode) {
	case 0: /* TOKEN */
		if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
			/*
			 * If the pagefault handshake is already activated,
			 * the token must not be changed.  We have to return
			 * decimal 8 instead, as mandated in SC24-6084.
			 */
			vcpu->run->s.regs.gprs[ry] = 8;
			return 0;
		}

		if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
		    parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

		hva_token = gfn_to_hva(vcpu->kvm, gpa_to_gfn(parm.token_addr));
		if (kvm_is_error_hva(hva_token))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		vcpu->arch.pfault_token = parm.token_addr;
		vcpu->arch.pfault_select = parm.select_mask;
		vcpu->arch.pfault_compare = parm.compare_mask;
		vcpu->run->s.regs.gprs[ry] = 0;
		rc = 0;
		break;
	case 1: /*
		 * CANCEL
		 * Specification allows to let already pending tokens survive
		 * the cancel, therefore to reduce code complexity, we assume
		 * all outstanding tokens are already pending.
		 */
		if (parm.token_addr || parm.select_mask ||
		    parm.compare_mask || parm.zarch)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

		vcpu->run->s.regs.gprs[ry] = 0;
		/*
		 * If the pfault handling was not established or is already
		 * canceled SC24-6084 requests to return decimal 4.
		 */
		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
			vcpu->run->s.regs.gprs[ry] = 4;
		else
			vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;

		rc = 0;
		break;
	default:
		rc = -EOPNOTSUPP;
		break;
	}

	return rc;
}

132 133 134 135
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
	vcpu->stat.diagnose_44++;
136
	kvm_vcpu_on_spin(vcpu);
137 138 139
	return 0;
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = vcpu->kvm;
	struct kvm_vcpu *tcpu;
	int tid;
	int i;

	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
	vcpu->stat.diagnose_9c++;
	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);

	if (tid == vcpu->vcpu_id)
		return 0;

	kvm_for_each_vcpu(i, tcpu, kvm)
		if (tcpu->vcpu_id == tid) {
			kvm_vcpu_yield_to(tcpu);
			break;
		}

	return 0;
}

163 164 165
static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
{
	unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
166
	unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
167 168 169 170 171 172 173 174 175 176

	VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
	switch (subcode) {
	case 3:
		vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
		break;
	case 4:
		vcpu->run->s390_reset_flags = 0;
		break;
	default:
177
		return -EOPNOTSUPP;
178 179
	}

180
	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
181 182 183 184
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
	vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
185
	VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
186
	  vcpu->run->s390_reset_flags);
187
	trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
188 189 190
	return -EREMOTE;
}

C
Cornelia Huck 已提交
191 192
static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{
193
	int ret;
C
Cornelia Huck 已提交
194 195 196 197 198 199 200 201 202 203

	/* No virtio-ccw notification? Get out quickly. */
	if (!vcpu->kvm->arch.css_support ||
	    (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
		return -EOPNOTSUPP;

	/*
	 * The layout is as follows:
	 * - gpr 2 contains the subchannel id (passed as addr)
	 * - gpr 3 contains the virtqueue index (passed as datamatch)
204
	 * - gpr 4 contains the index on the bus (optionally)
C
Cornelia Huck 已提交
205
	 */
206
	ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
207
				      vcpu->run->s.regs.gprs[2] & 0xffffffff,
208 209 210 211 212 213 214 215 216 217
				      8, &vcpu->run->s.regs.gprs[3],
				      vcpu->run->s.regs.gprs[4]);

	/*
	 * Return cookie in gpr 2, but don't overwrite the register if the
	 * diagnose will be handled by userspace.
	 */
	if (ret != -EOPNOTSUPP)
		vcpu->run->s.regs.gprs[2] = ret;
	/* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
C
Cornelia Huck 已提交
218 219 220
	return ret < 0 ? ret : 0;
}

221 222
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
223
	int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
224

225 226 227
	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

228
	trace_kvm_s390_handle_diag(vcpu, code);
229
	switch (code) {
230 231
	case 0x10:
		return diag_release_pages(vcpu);
232 233
	case 0x44:
		return __diag_time_slice_end(vcpu);
234 235
	case 0x9c:
		return __diag_time_slice_end_directed(vcpu);
236 237
	case 0x258:
		return __diag_page_ref_service(vcpu);
238 239
	case 0x308:
		return __diag_ipl_functions(vcpu);
C
Cornelia Huck 已提交
240 241
	case 0x500:
		return __diag_virtio_hypercall(vcpu);
242
	default:
243
		return -EOPNOTSUPP;
244 245
	}
}