kvm-s390.h 12.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
 * definition for kvm on s390
4
 *
5
 * Copyright IBM Corp. 2008, 2009
6 7 8
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
9
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10 11 12 13
 */

#ifndef ARCH_S390_KVM_S390_H
#define ARCH_S390_KVM_S390_H
14

15
#include <linux/hrtimer.h>
16
#include <linux/kvm.h>
17
#include <linux/kvm_host.h>
18
#include <asm/facility.h>
19
#include <asm/processor.h>
20
#include <asm/sclp.h>
21 22 23

typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);

24
/* Transactional Memory Execution related macros */
25
#define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & ECB_TE))
26 27 28
#define TDB_FORMAT1		1
#define IS_ITDB_VALID(vcpu)	((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))

29 30 31 32 33 34 35
extern debug_info_t *kvm_s390_dbf;
#define KVM_EVENT(d_loglevel, d_string, d_args...)\
do { \
	debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
	  d_args); \
} while (0)

36 37 38 39 40 41 42 43 44 45 46 47 48
#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
do { \
	debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
	  d_args); \
} while (0)

#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
do { \
	debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
	  "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
	  d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
	  d_args); \
} while (0)
49

50 51 52 53 54
static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
{
	atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
}

55
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
56
{
57
	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
58 59
}

60 61
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
62
	return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
63 64
}

65 66 67 68 69 70 71 72 73 74
static inline int kvm_is_ucontrol(struct kvm *kvm)
{
#ifdef CONFIG_KVM_S390_UCONTROL
	if (kvm->arch.gmap)
		return 0;
	return 1;
#else
	return 0;
#endif
}
75

76 77 78 79 80 81
#define GUEST_PREFIX_SHIFT 13
static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
}

82 83
static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
{
84 85
	VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
		   prefix);
86
	vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
87
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
88
	kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
89 90
}

91
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
92
{
93 94
	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
95

96 97 98
	if (ar)
		*ar = base2;

99 100 101 102
	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}

static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
103
					      u64 *address1, u64 *address2,
104
					      u8 *ar_b1, u8 *ar_b2)
105
{
106 107 108 109
	u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
	u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
	u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
	u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
110 111 112

	*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
	*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
113 114 115 116 117

	if (ar_b1)
		*ar_b1 = base1;
	if (ar_b2)
		*ar_b2 = base2;
118 119
}

120 121
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
{
122 123 124 125
	if (r1)
		*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
	if (r2)
		*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
126 127
}

128
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
129
{
130 131
	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
132
			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
133 134 135
	/* The displacement is a 20bit _SIGNED_ value */
	if (disp2 & 0x80000)
		disp2+=0xfff00000;
136

137 138 139
	if (ar)
		*ar = base2;

140
	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
141 142
}

143
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
144
{
145 146
	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
147

148 149 150
	if (ar)
		*ar = base2;

151 152 153
	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}

154 155 156 157 158 159 160
/* Set the condition code in the guest program status word */
static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
{
	vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}

161
/* test availability of facility in a kvm instance */
162 163
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
164 165
	return __test_facility(nr, kvm->arch.model.fac_mask) &&
		__test_facility(nr, kvm->arch.model.fac_list);
166 167
}

168 169 170 171 172 173 174 175 176 177 178
static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
{
	unsigned char *ptr;

	if (nr >= MAX_FACILITY_BIT)
		return -EINVAL;
	ptr = (unsigned char *) fac_list + (nr >> 3);
	*ptr |= (0x80UL >> (nr & 7));
	return 0;
}

179 180 181 182 183 184
static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
{
	WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
	return test_bit_inv(nr, kvm->arch.cpu_feat);
}

185 186 187 188 189 190
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
	return kvm->arch.user_cpu_state_ctrl != 0;
}

191
/* implemented in interrupt.c */
192
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
193
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
194
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
195
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
196
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
197
void kvm_s390_clear_float_irqs(struct kvm *kvm);
198 199 200
int __must_check kvm_s390_inject_vm(struct kvm *kvm,
				    struct kvm_s390_interrupt *s390int);
int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
201
				      struct kvm_s390_irq *irq);
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
					   struct kvm_s390_pgm_info *pgm_info)
{
	struct kvm_s390_irq irq = {
		.type = KVM_S390_PROGRAM_INT,
		.u.pgm = *pgm_info,
	};

	return kvm_s390_inject_vcpu(vcpu, &irq);
}
static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
{
	struct kvm_s390_irq irq = {
		.type = KVM_S390_PROGRAM_INT,
		.u.pgm.code = code,
	};

	return kvm_s390_inject_vcpu(vcpu, &irq);
}
221
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
222
						    u64 isc_mask, u32 schid);
223 224
int kvm_s390_reinject_io_int(struct kvm *kvm,
			     struct kvm_s390_interrupt_info *inti);
225
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
226

227
/* implemented in intercept.c */
228
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
229
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
230 231 232 233 234 235 236 237 238 239 240 241
static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
{
	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;

	sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
}
static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
{
	kvm_s390_rewind_psw(vcpu, -ilen);
}
static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
{
242 243
	/* don't inject PER events if we re-execute the instruction */
	vcpu->arch.sie_block->icptstatus &= ~0x02;
244 245
	kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
}
246

247 248
int handle_sthyi(struct kvm_vcpu *vcpu);

249
/* implemented in priv.c */
T
Thomas Huth 已提交
250
int is_valid_psw(psw_t *psw);
F
Fan Zhang 已提交
251
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
252
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
F
Fan Zhang 已提交
253
int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
254
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
255
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
256 257
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
258
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
259 260
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
261
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
262

263 264
/* implemented in vsie.c */
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
265
void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
266 267 268 269 270
void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
				 unsigned long end);
void kvm_s390_vsie_init(struct kvm *kvm);
void kvm_s390_vsie_destroy(struct kvm *kvm);

271 272
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
273
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
274 275

/* implemented in kvm-s390.c */
276 277
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
				 const struct kvm_s390_vm_tod_clock *gtod);
278
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
279
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
280 281
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
282 283
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
284 285
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
286
void exit_sie(struct kvm_vcpu *vcpu);
287
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
288 289
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
290 291
unsigned long kvm_s390_fac_list_mask_size(void);
extern unsigned long kvm_s390_fac_list_mask[];
292 293
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
294

295 296 297
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
{
	int i;
	struct kvm_vcpu *vcpu;

	WARN_ON(!mutex_is_locked(&kvm->lock));
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_s390_vcpu_block(vcpu);
}

static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
{
	int i;
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_s390_vcpu_unblock(vcpu);
}

317 318 319 320 321 322 323 324 325 326
static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
{
	u64 rc;

	preempt_disable();
	rc = get_tod_clock_fast() + kvm->arch.epoch;
	preempt_enable();
	return rc;
}

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/**
 * kvm_s390_inject_prog_cond - conditionally inject a program check
 * @vcpu: virtual cpu
 * @rc: original return/error code
 *
 * This function is supposed to be used after regular guest access functions
 * failed, to conditionally inject a program check to a vcpu. The typical
 * pattern would look like
 *
 * rc = write_guest(vcpu, addr, data, len);
 * if (rc)
 *	return kvm_s390_inject_prog_cond(vcpu, rc);
 *
 * A negative return code from guest access functions implies an internal error
 * like e.g. out of memory. In these cases no program check should be injected
 * to the guest.
 * A positive value implies that an exception happened while accessing a guest's
 * memory. In this case all data belonging to the corresponding program check
 * has been stored in vcpu->arch.pgm and can be injected with
 * kvm_s390_inject_prog_irq().
 *
 * Returns: - the original @rc value if @rc was negative (internal error)
 *	    - zero if @rc was already zero
 *	    - zero or error code from injecting if @rc was positive
 *	      (program check injected to @vcpu)
 */
static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
{
	if (rc <= 0)
		return rc;
	return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}

360 361 362
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
			struct kvm_s390_irq *s390irq);

363
/* implemented in interrupt.c */
364
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
365
int psw_extint_disabled(struct kvm_vcpu *vcpu);
366
void kvm_s390_destroy_adapters(struct kvm *kvm);
367
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
368
extern struct kvm_device_ops kvm_flic_ops;
369 370
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
371 372 373 374
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
			   void __user *buf, int len);
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
			   __u8 __user *buf, int len);
375

376 377 378 379 380 381 382 383
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
			    struct kvm_guest_debug *dbg);
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
384
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
385
int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
386

387 388 389
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{
390 391 392
	struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */

	return &sca->ipte_control;
393
}
394 395 396 397 398 399 400 401 402
static inline int kvm_s390_use_sca_entries(void)
{
	/*
	 * Without SIGP interpretation, only SRS interpretation (if available)
	 * might use the entries. By not setting the entries and keeping them
	 * invalid, hardware will not access them but intercept.
	 */
	return sclp.has_sigpif;
}
403 404
void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
				     struct mcck_volatile_info *mcck_info);
405
#endif