kvm_host.h 23.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
 * definition for kernel virtual machines on s390
4
 *
5
 * Copyright IBM Corp. 2008, 2018
6 7 8 9 10 11 12
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 */


#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
13 14

#include <linux/types.h>
15 16
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
17
#include <linux/kvm_types.h>
18
#include <linux/kvm_host.h>
19
#include <linux/kvm.h>
20
#include <linux/seqlock.h>
21
#include <linux/module.h>
22
#include <asm/debug.h>
23
#include <asm/cpu.h>
24
#include <asm/fpu/api.h>
25
#include <asm/isc.h>
F
Fan Zhang 已提交
26
#include <asm/guarded_storage.h>
27

28 29
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
30
#define KVM_MAX_VCPUS 255
31
#define KVM_USER_MEM_SLOTS 32
32

33 34 35 36 37 38 39
/*
 * These seem to be used for allocating ->chip in the routing table,
 * which we don't use. 4096 is an out-of-thin-air value. If we need
 * to look at ->chip later on, we'll need to revisit this.
 */
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
40
#define KVM_HALT_POLL_NS_DEFAULT 50000
41

42
/* s390-specific vcpu->requests bit members */
43 44 45
#define KVM_REQ_ENABLE_IBS	KVM_ARCH_REQ(0)
#define KVM_REQ_DISABLE_IBS	KVM_ARCH_REQ(1)
#define KVM_REQ_ICPT_OPEREXC	KVM_ARCH_REQ(2)
46 47
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION  KVM_ARCH_REQ(4)
48
#define KVM_REQ_VSIE_RESTART	KVM_ARCH_REQ(5)
49

50 51
#define SIGP_CTRL_C		0x80
#define SIGP_CTRL_SCN_MASK	0x3f
52

53 54 55 56 57 58 59
union bsca_sigp_ctrl {
	__u8 value;
	struct {
		__u8 c : 1;
		__u8 r : 1;
		__u8 scn : 6;
	};
60
};
61 62 63 64 65 66 67 68

union esca_sigp_ctrl {
	__u16 value;
	struct {
		__u8 c : 1;
		__u8 reserved: 7;
		__u8 scn;
	};
69
};
70 71 72 73 74 75

struct esca_entry {
	union esca_sigp_ctrl sigp_ctrl;
	__u16   reserved1[3];
	__u64   sda;
	__u64   reserved2[6];
76
};
77 78

struct bsca_entry {
79
	__u8	reserved0;
80
	union bsca_sigp_ctrl	sigp_ctrl;
81
	__u16	reserved[3];
82 83
	__u64	sda;
	__u64	reserved2[2];
84
};
85

86 87 88 89 90 91 92 93
union ipte_control {
	unsigned long val;
	struct {
		unsigned long k  : 1;
		unsigned long kh : 31;
		unsigned long kg : 32;
	};
};
94

95
struct bsca_block {
96
	union ipte_control ipte_control;
97 98 99
	__u64	reserved[5];
	__u64	mcn;
	__u64	reserved2;
100
	struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
101
};
102

103 104 105 106 107 108
struct esca_block {
	union ipte_control ipte_control;
	__u64   reserved1[7];
	__u64   mcn[4];
	__u64   reserved2[20];
	struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
109
};
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124
/*
 * This struct is used to store some machine check info from lowcore
 * for machine checks that happen while the guest is running.
 * This info in host's lowcore might be overwritten by a second machine
 * check from host when host is in the machine check's high-level handling.
 * The size is 24 bytes.
 */
struct mcck_volatile_info {
	__u64 mcic;
	__u64 failing_storage_address;
	__u32 ext_damage_code;
	__u32 reserved;
};

125
#define CPUSTAT_STOPPED    0x80000000
126 127 128 129 130 131 132 133 134 135 136 137 138 139
#define CPUSTAT_WAIT       0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT   0x04000000
#define CPUSTAT_IO_INT     0x02000000
#define CPUSTAT_EXT_INT    0x01000000
#define CPUSTAT_RUNNING    0x00800000
#define CPUSTAT_RETAINED   0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB    0x00010000
#define CPUSTAT_RRF        0x00008000
#define CPUSTAT_SLSV       0x00004000
#define CPUSTAT_SLSR       0x00002000
#define CPUSTAT_ZARCH      0x00000800
#define CPUSTAT_MCDS       0x00000100
140
#define CPUSTAT_KSS        0x00000200
141
#define CPUSTAT_SM         0x00000080
142
#define CPUSTAT_IBS        0x00000040
143
#define CPUSTAT_GED2       0x00000010
144
#define CPUSTAT_G          0x00000008
145
#define CPUSTAT_GED        0x00000004
146 147 148
#define CPUSTAT_J          0x00000002
#define CPUSTAT_P          0x00000001

149
struct kvm_s390_sie_block {
150
	atomic_t cpuflags;		/* 0x0000 */
151 152
	__u32 : 1;			/* 0x0004 */
	__u32 prefix : 18;
153 154
	__u32 : 1;
	__u32 ibc : 12;
155 156 157
	__u8	reserved08[4];		/* 0x0008 */
#define PROG_IN_SIE (1<<0)
	__u32	prog0c;			/* 0x000c */
158
	__u8	reserved10[16];		/* 0x0010 */
159 160
#define PROG_BLOCK_SIE	(1<<0)
#define PROG_REQUEST	(1<<1)
161 162
	atomic_t prog20;		/* 0x0020 */
	__u8	reserved24[4];		/* 0x0024 */
163 164 165
	__u64	cputm;			/* 0x0028 */
	__u64	ckc;			/* 0x0030 */
	__u64	epoch;			/* 0x0038 */
166
	__u32	svcc;			/* 0x0040 */
167
#define LCTL_CR0	0x8000
168
#define LCTL_CR6	0x0200
169 170 171
#define LCTL_CR9	0x0040
#define LCTL_CR10	0x0020
#define LCTL_CR11	0x0010
172
#define LCTL_CR14	0x0002
173 174
	__u16   lctl;			/* 0x0044 */
	__s16	icpua;			/* 0x0046 */
J
Janosch Frank 已提交
175
#define ICTL_OPEREXC	0x80000000
176 177 178 179 180 181
#define ICTL_PINT	0x20000000
#define ICTL_LPSW	0x00400000
#define ICTL_STCTL	0x00040000
#define ICTL_ISKE	0x00004000
#define ICTL_SSKE	0x00002000
#define ICTL_RRBE	0x00001000
182
#define ICTL_TPROT	0x00000200
183
	__u32	ictl;			/* 0x0048 */
184 185 186 187
#define ECA_CEI		0x80000000
#define ECA_IB		0x40000000
#define ECA_SIGPI	0x10000000
#define ECA_MVPGI	0x01000000
188
#define ECA_AIV		0x00200000
189 190
#define ECA_VX		0x00020000
#define ECA_PROTEXCI	0x00002000
191
#define ECA_APIE	0x00000008
192
#define ECA_SII		0x00000001
193
	__u32	eca;			/* 0x004c */
194 195 196
#define ICPT_INST	0x04
#define ICPT_PROGI	0x08
#define ICPT_INSTPROGI	0x0C
197
#define ICPT_EXTREQ	0x10
198
#define ICPT_EXTINT	0x14
199 200
#define ICPT_IOREQ	0x18
#define ICPT_WAIT	0x1c
201 202
#define ICPT_VALIDITY	0x20
#define ICPT_STOP	0x28
203 204 205
#define ICPT_OPEREXC	0x2C
#define ICPT_PARTEXEC	0x38
#define ICPT_IOINST	0x40
206
#define ICPT_KSS	0x5c
207
	__u8	icptcode;		/* 0x0050 */
208
	__u8	icptstatus;		/* 0x0051 */
209 210 211 212 213
	__u16	ihcpu;			/* 0x0052 */
	__u8	reserved54[2];		/* 0x0054 */
	__u16	ipa;			/* 0x0056 */
	__u32	ipb;			/* 0x0058 */
	__u32	scaoh;			/* 0x005c */
214 215
#define FPF_BPBC 	0x20
	__u8	fpf;			/* 0x0060 */
F
Fan Zhang 已提交
216
#define ECB_GS		0x40
217 218 219
#define ECB_TE		0x10
#define ECB_SRSI	0x04
#define ECB_HOSTPROTINT	0x02
220
	__u8	ecb;			/* 0x0061 */
221 222 223 224
#define ECB2_CMMA	0x80
#define ECB2_IEP	0x20
#define ECB2_PFMFI	0x08
#define ECB2_ESCA	0x04
225
	__u8    ecb2;                   /* 0x0062 */
226
#define ECB3_DEA 0x08
227 228
#define ECB3_AES 0x04
#define ECB3_RI  0x01
229
	__u8    ecb3;			/* 0x0063 */
230
	__u32	scaol;			/* 0x0064 */
231 232 233
	__u8	reserved68;		/* 0x0068 */
	__u8    epdx;			/* 0x0069 */
	__u8    reserved6a[2];		/* 0x006a */
234
	__u32	todpr;			/* 0x006c */
235
#define GISA_FORMAT1 0x00000001
236 237
	__u32	gd;			/* 0x0070 */
	__u8	reserved74[12];		/* 0x0074 */
238 239
	__u64	mso;			/* 0x0080 */
	__u64	msl;			/* 0x0088 */
240 241 242
	psw_t	gpsw;			/* 0x0090 */
	__u64	gg14;			/* 0x00a0 */
	__u64	gg15;			/* 0x00a8 */
243 244 245 246 247
	__u8	reservedb0[8];		/* 0x00b0 */
#define HPID_KVM	0x4
#define HPID_VSIE	0x5
	__u8	hpid;			/* 0x00b8 */
	__u8	reservedb9[11];		/* 0x00b9 */
248 249 250
	__u16	extcpuaddr;		/* 0x00c4 */
	__u16	eic;			/* 0x00c6 */
	__u32	reservedc8;		/* 0x00c8 */
251 252 253 254 255 256 257 258 259 260 261 262 263
	__u16	pgmilc;			/* 0x00cc */
	__u16	iprcc;			/* 0x00ce */
	__u32	dxc;			/* 0x00d0 */
	__u16	mcn;			/* 0x00d4 */
	__u8	perc;			/* 0x00d6 */
	__u8	peratmid;		/* 0x00d7 */
	__u64	peraddr;		/* 0x00d8 */
	__u8	eai;			/* 0x00e0 */
	__u8	peraid;			/* 0x00e1 */
	__u8	oai;			/* 0x00e2 */
	__u8	armid;			/* 0x00e3 */
	__u8	reservede4[4];		/* 0x00e4 */
	__u64	tecmc;			/* 0x00e8 */
264
	__u8	reservedf0[12];		/* 0x00f0 */
265
#define CRYCB_FORMAT_MASK 0x00000003
266
#define CRYCB_FORMAT0 0x00000000
267
#define CRYCB_FORMAT1 0x00000001
268
#define CRYCB_FORMAT2 0x00000003
269
	__u32	crycbd;			/* 0x00fc */
270 271
	__u64	gcr[16];		/* 0x0100 */
	__u64	gbea;			/* 0x0180 */
F
Fan Zhang 已提交
272 273 274
	__u8    reserved188[8];		/* 0x0188 */
	__u64   sdnxo;			/* 0x0190 */
	__u8    reserved198[8];		/* 0x0198 */
275
	__u32	fac;			/* 0x01a0 */
276 277
	__u8	reserved1a4[20];	/* 0x01a4 */
	__u64	cbrlo;			/* 0x01b8 */
278
	__u8	reserved1c0[8];		/* 0x01c0 */
279
#define ECD_HOSTREGMGMT	0x20000000
280
#define ECD_MEF		0x08000000
281
#define ECD_ETOKENF	0x02000000
282
#define ECD_ECC		0x00200000
283 284
	__u32	ecd;			/* 0x01c8 */
	__u8	reserved1cc[18];	/* 0x01cc */
285 286
	__u64	pp;			/* 0x01de */
	__u8	reserved1e6[2];		/* 0x01e6 */
287
	__u64	itdba;			/* 0x01e8 */
288
	__u64   riccbd;			/* 0x01f0 */
289
	__u64	gvrd;			/* 0x01f8 */
290 291
} __attribute__((packed));

292 293
struct kvm_s390_itdb {
	__u8	data[256];
294
};
295 296 297

struct sie_page {
	struct kvm_s390_sie_block sie_block;
298 299
	struct mcck_volatile_info mcck_info;	/* 0x0200 */
	__u8 reserved218[1000];		/* 0x0218 */
300
	struct kvm_s390_itdb itdb;	/* 0x0600 */
301
	__u8 reserved700[2304];		/* 0x0700 */
302
};
303

304
struct kvm_vcpu_stat {
305 306 307
	u64 exit_userspace;
	u64 exit_null;
	u64 exit_external_request;
308
	u64 exit_io_request;
309 310 311 312 313 314 315 316
	u64 exit_external_interrupt;
	u64 exit_stop_request;
	u64 exit_validity;
	u64 exit_instruction;
	u64 exit_pei;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
317
	u64 halt_no_poll_steal;
318 319 320 321 322 323 324 325
	u64 halt_wakeup;
	u64 instruction_lctl;
	u64 instruction_lctlg;
	u64 instruction_stctl;
	u64 instruction_stctg;
	u64 exit_program_interruption;
	u64 exit_instr_and_program;
	u64 exit_operation_exception;
326 327
	u64 deliver_ckc;
	u64 deliver_cputm;
328 329 330
	u64 deliver_external_call;
	u64 deliver_emergency_signal;
	u64 deliver_service_signal;
331
	u64 deliver_virtio;
332 333 334
	u64 deliver_stop_signal;
	u64 deliver_prefix_signal;
	u64 deliver_restart_signal;
335 336
	u64 deliver_program;
	u64 deliver_io;
337
	u64 deliver_machine_check;
338
	u64 exit_wait_state;
339 340 341 342 343 344 345 346 347 348
	u64 inject_ckc;
	u64 inject_cputm;
	u64 inject_external_call;
	u64 inject_emergency_signal;
	u64 inject_mchk;
	u64 inject_pfault_init;
	u64 inject_program;
	u64 inject_restart;
	u64 inject_set_prefix;
	u64 inject_stop_signal;
349 350 351 352 353
	u64 instruction_epsw;
	u64 instruction_gs;
	u64 instruction_io_other;
	u64 instruction_lpsw;
	u64 instruction_lpswe;
354
	u64 instruction_pfmf;
355 356 357
	u64 instruction_ptff;
	u64 instruction_sck;
	u64 instruction_sckpf;
358 359 360 361
	u64 instruction_stidp;
	u64 instruction_spx;
	u64 instruction_stpx;
	u64 instruction_stap;
362 363 364 365
	u64 instruction_iske;
	u64 instruction_ri;
	u64 instruction_rrbe;
	u64 instruction_sske;
366 367 368
	u64 instruction_ipte_interlock;
	u64 instruction_stsi;
	u64 instruction_stfl;
369 370
	u64 instruction_tb;
	u64 instruction_tpi;
371
	u64 instruction_tprot;
372
	u64 instruction_tsch;
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
	u64 instruction_sie;
	u64 instruction_essa;
	u64 instruction_sthyi;
	u64 instruction_sigp_sense;
	u64 instruction_sigp_sense_running;
	u64 instruction_sigp_external_call;
	u64 instruction_sigp_emergency;
	u64 instruction_sigp_cond_emergency;
	u64 instruction_sigp_start;
	u64 instruction_sigp_stop;
	u64 instruction_sigp_stop_store_status;
	u64 instruction_sigp_store_status;
	u64 instruction_sigp_store_adtl_status;
	u64 instruction_sigp_arch;
	u64 instruction_sigp_prefix;
	u64 instruction_sigp_restart;
	u64 instruction_sigp_init_cpu_reset;
	u64 instruction_sigp_cpu_reset;
	u64 instruction_sigp_unknown;
	u64 diagnose_10;
	u64 diagnose_44;
	u64 diagnose_9c;
	u64 diagnose_258;
	u64 diagnose_308;
	u64 diagnose_500;
398
	u64 diagnose_other;
399 400
};

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
#define PGM_OPERATION			0x01
#define PGM_PRIVILEGED_OP		0x02
#define PGM_EXECUTE			0x03
#define PGM_PROTECTION			0x04
#define PGM_ADDRESSING			0x05
#define PGM_SPECIFICATION		0x06
#define PGM_DATA			0x07
#define PGM_FIXED_POINT_OVERFLOW	0x08
#define PGM_FIXED_POINT_DIVIDE		0x09
#define PGM_DECIMAL_OVERFLOW		0x0a
#define PGM_DECIMAL_DIVIDE		0x0b
#define PGM_HFP_EXPONENT_OVERFLOW	0x0c
#define PGM_HFP_EXPONENT_UNDERFLOW	0x0d
#define PGM_HFP_SIGNIFICANCE		0x0e
#define PGM_HFP_DIVIDE			0x0f
#define PGM_SEGMENT_TRANSLATION		0x10
#define PGM_PAGE_TRANSLATION		0x11
#define PGM_TRANSLATION_SPEC		0x12
#define PGM_SPECIAL_OPERATION		0x13
#define PGM_OPERAND			0x15
#define PGM_TRACE_TABEL			0x16
E
Eric Farman 已提交
422
#define PGM_VECTOR_PROCESSING		0x1b
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
#define PGM_SPACE_SWITCH		0x1c
#define PGM_HFP_SQUARE_ROOT		0x1d
#define PGM_PC_TRANSLATION_SPEC		0x1f
#define PGM_AFX_TRANSLATION		0x20
#define PGM_ASX_TRANSLATION		0x21
#define PGM_LX_TRANSLATION		0x22
#define PGM_EX_TRANSLATION		0x23
#define PGM_PRIMARY_AUTHORITY		0x24
#define PGM_SECONDARY_AUTHORITY		0x25
#define PGM_LFX_TRANSLATION		0x26
#define PGM_LSX_TRANSLATION		0x27
#define PGM_ALET_SPECIFICATION		0x28
#define PGM_ALEN_TRANSLATION		0x29
#define PGM_ALE_SEQUENCE		0x2a
#define PGM_ASTE_VALIDITY		0x2b
#define PGM_ASTE_SEQUENCE		0x2c
#define PGM_EXTENDED_AUTHORITY		0x2d
#define PGM_LSTE_SEQUENCE		0x2e
#define PGM_ASTE_INSTANCE		0x2f
#define PGM_STACK_FULL			0x30
#define PGM_STACK_EMPTY			0x31
#define PGM_STACK_SPECIFICATION		0x32
#define PGM_STACK_TYPE			0x33
#define PGM_STACK_OPERATION		0x34
#define PGM_ASCE_TYPE			0x38
#define PGM_REGION_FIRST_TRANS		0x39
#define PGM_REGION_SECOND_TRANS		0x3a
#define PGM_REGION_THIRD_TRANS		0x3b
#define PGM_MONITOR			0x40
#define PGM_PER				0x80
#define PGM_CRYPTO_OPERATION		0x119
454

455
/* irq types in ascend order of priorities */
456
enum irq_types {
457
	IRQ_PEND_SET_PREFIX = 0,
458
	IRQ_PEND_RESTART,
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	IRQ_PEND_SIGP_STOP,
	IRQ_PEND_IO_ISC_7,
	IRQ_PEND_IO_ISC_6,
	IRQ_PEND_IO_ISC_5,
	IRQ_PEND_IO_ISC_4,
	IRQ_PEND_IO_ISC_3,
	IRQ_PEND_IO_ISC_2,
	IRQ_PEND_IO_ISC_1,
	IRQ_PEND_IO_ISC_0,
	IRQ_PEND_VIRTIO,
	IRQ_PEND_PFAULT_DONE,
	IRQ_PEND_PFAULT_INIT,
	IRQ_PEND_EXT_HOST,
	IRQ_PEND_EXT_SERVICE,
	IRQ_PEND_EXT_TIMING,
	IRQ_PEND_EXT_CPU_TIMER,
	IRQ_PEND_EXT_CLOCK_COMP,
	IRQ_PEND_EXT_EXTERNAL,
	IRQ_PEND_EXT_EMERGENCY,
	IRQ_PEND_EXT_MALFUNC,
	IRQ_PEND_EXT_IRQ_KEY,
	IRQ_PEND_MCHK_REP,
	IRQ_PEND_PROG,
	IRQ_PEND_SVC,
	IRQ_PEND_MCHK_EX,
484 485 486
	IRQ_PEND_COUNT
};

487 488 489 490 491
/* We have 2M for virtio device descriptor pages. Smallest amount of
 * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
 */
#define KVM_S390_MAX_VIRTIO_IRQS 87381

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
/*
 * Repressible (non-floating) machine check interrupts
 * subclass bits in MCIC
 */
#define MCHK_EXTD_BIT 58
#define MCHK_DEGR_BIT 56
#define MCHK_WARN_BIT 55
#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
		       (1UL << MCHK_EXTD_BIT) | \
		       (1UL << MCHK_WARN_BIT))

/* Exigent machine check interrupts subclass bits in MCIC */
#define MCHK_SD_BIT 63
#define MCHK_PD_BIT 62
#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))

#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY)    | \
			   (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
			   (1UL << IRQ_PEND_EXT_CPU_TIMER)  | \
			   (1UL << IRQ_PEND_EXT_MALFUNC)    | \
			   (1UL << IRQ_PEND_EXT_EMERGENCY)  | \
			   (1UL << IRQ_PEND_EXT_EXTERNAL)   | \
			   (1UL << IRQ_PEND_EXT_TIMING)     | \
			   (1UL << IRQ_PEND_EXT_HOST)       | \
			   (1UL << IRQ_PEND_EXT_SERVICE)    | \
			   (1UL << IRQ_PEND_VIRTIO)         | \
			   (1UL << IRQ_PEND_PFAULT_INIT)    | \
			   (1UL << IRQ_PEND_PFAULT_DONE))

#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
			  (1UL << IRQ_PEND_IO_ISC_1) | \
			  (1UL << IRQ_PEND_IO_ISC_2) | \
			  (1UL << IRQ_PEND_IO_ISC_3) | \
			  (1UL << IRQ_PEND_IO_ISC_4) | \
			  (1UL << IRQ_PEND_IO_ISC_5) | \
			  (1UL << IRQ_PEND_IO_ISC_6) | \
			  (1UL << IRQ_PEND_IO_ISC_7))

#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
			    (1UL << IRQ_PEND_MCHK_EX))

533
struct kvm_s390_interrupt_info {
534 535 536
	struct list_head list;
	u64	type;
	union {
537 538 539
		struct kvm_s390_io_info io;
		struct kvm_s390_ext_info ext;
		struct kvm_s390_pgm_info pgm;
540
		struct kvm_s390_emerg_info emerg;
541
		struct kvm_s390_extcall_info extcall;
542
		struct kvm_s390_prefix_info prefix;
543
		struct kvm_s390_stop_info stop;
544
		struct kvm_s390_mchk_info mchk;
545 546 547
	};
};

548 549 550 551 552 553 554
struct kvm_s390_irq_payload {
	struct kvm_s390_io_info io;
	struct kvm_s390_ext_info ext;
	struct kvm_s390_pgm_info pgm;
	struct kvm_s390_emerg_info emerg;
	struct kvm_s390_extcall_info extcall;
	struct kvm_s390_prefix_info prefix;
555
	struct kvm_s390_stop_info stop;
556 557 558
	struct kvm_s390_mchk_info mchk;
};

559
struct kvm_s390_local_interrupt {
560
	spinlock_t lock;
561 562 563
	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
	struct kvm_s390_irq_payload irq;
	unsigned long pending_irqs;
564 565
};

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
#define FIRQ_LIST_IO_ISC_0 0
#define FIRQ_LIST_IO_ISC_1 1
#define FIRQ_LIST_IO_ISC_2 2
#define FIRQ_LIST_IO_ISC_3 3
#define FIRQ_LIST_IO_ISC_4 4
#define FIRQ_LIST_IO_ISC_5 5
#define FIRQ_LIST_IO_ISC_6 6
#define FIRQ_LIST_IO_ISC_7 7
#define FIRQ_LIST_PFAULT   8
#define FIRQ_LIST_VIRTIO   9
#define FIRQ_LIST_COUNT   10
#define FIRQ_CNTR_IO       0
#define FIRQ_CNTR_SERVICE  1
#define FIRQ_CNTR_VIRTIO   2
#define FIRQ_CNTR_PFAULT   3
#define FIRQ_MAX_COUNT     4

583 584 585 586 587 588
/* mask the AIS mode for a given ISC */
#define AIS_MODE_MASK(isc) (0x80 >> isc)

#define KVM_S390_AIS_MODE_ALL    0
#define KVM_S390_AIS_MODE_SINGLE 1

589
struct kvm_s390_float_interrupt {
590
	unsigned long pending_irqs;
591
	spinlock_t lock;
592 593 594 595
	struct list_head lists[FIRQ_LIST_COUNT];
	int counters[FIRQ_MAX_COUNT];
	struct kvm_s390_mchk_info mchk;
	struct kvm_s390_ext_info srv_signal;
596
	int next_rr_cpu;
597 598 599
	struct mutex ais_lock;
	u8 simm;
	u8 nimm;
600 601
};

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
struct kvm_hw_wp_info_arch {
	unsigned long addr;
	unsigned long phys_addr;
	int len;
	char *old_data;
};

struct kvm_hw_bp_info_arch {
	unsigned long addr;
	int len;
};

/*
 * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
 * Further KVM_GUESTDBG flags which an be used from userspace can be found in
 * arch/s390/include/uapi/asm/kvm.h
 */
#define KVM_GUESTDBG_EXIT_PENDING 0x10000000

#define guestdbg_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
#define guestdbg_sstep_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
#define guestdbg_hw_bp_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
		(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))

struct kvm_guestdbg_info_arch {
	unsigned long cr0;
	unsigned long cr9;
	unsigned long cr10;
	unsigned long cr11;
	struct kvm_hw_bp_info_arch *hw_bp_info;
	struct kvm_hw_wp_info_arch *hw_wp_info;
	int nr_hw_bp;
	int nr_hw_wp;
	unsigned long last_bp;
};
641

642
struct kvm_vcpu_arch {
643
	struct kvm_s390_sie_block *sie_block;
644 645
	/* if vsie is active, currently executed shadow sie control block */
	struct kvm_s390_sie_block *vsie_block;
646
	unsigned int      host_acrs[NUM_ACRS];
F
Fan Zhang 已提交
647
	struct gs_cb      *host_gscb;
648
	struct fpu	  host_fpregs;
649
	struct kvm_s390_local_interrupt local_int;
650
	struct hrtimer    ckc_timer;
651
	struct kvm_s390_pgm_info pgm;
652
	struct gmap *gmap;
653 654
	/* backup location for the currently enabled gmap when scheduled out */
	struct gmap *enabled_gmap;
655
	struct kvm_guestdbg_info_arch guestdbg;
656 657 658
	unsigned long pfault_token;
	unsigned long pfault_select;
	unsigned long pfault_compare;
659
	bool cputm_enabled;
660 661 662 663 664 665 666
	/*
	 * The seqcount protects updates to cputm_start and sie_block.cputm,
	 * this way we can have non-blocking reads with consistent values.
	 * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
	 * values and to start/stop/enable/disable cpu timer accounting.
	 */
	seqcount_t cputm_seqcount;
667
	__u64 cputm_start;
F
Fan Zhang 已提交
668
	bool gs_enabled;
669
	bool skey_enabled;
670 671 672
};

struct kvm_vm_stat {
673 674 675 676 677 678
	u64 inject_io;
	u64 inject_float_mchk;
	u64 inject_pfault_done;
	u64 inject_service_signal;
	u64 inject_virtio;
	u64 remote_tlb_flush;
679 680
};

681 682 683
struct kvm_arch_memory_slot {
};

684 685 686 687 688 689 690 691 692 693 694 695 696
struct s390_map_info {
	struct list_head list;
	__u64 guest_addr;
	__u64 addr;
	struct page *page;
};

struct s390_io_adapter {
	unsigned int id;
	int isc;
	bool maskable;
	bool masked;
	bool swap;
697
	bool suppressible;
698 699 700 701 702 703 704 705
	struct rw_semaphore maps_lock;
	struct list_head maps;
	atomic_t nr_maps;
};

#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256

706 707 708 709 710 711 712 713 714
/* maximum size of facilities and facility mask is 2k bytes */
#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
#define S390_ARCH_FAC_LIST_SIZE_U64 \
	(S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
#define S390_ARCH_FAC_MASK_SIZE_U64 \
	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))

struct kvm_s390_cpu_model {
715 716
	/* facility mask supported by kvm & hosting machine */
	__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
717
	struct kvm_s390_vm_cpu_subfunc subfuncs;
718 719
	/* facility list requested by guest (in dma page) */
	__u64 *fac_list;
720
	u64 cpuid;
721
	unsigned short ibc;
722 723
};

724 725 726 727 728
struct kvm_s390_module_hook {
	int (*hook)(struct kvm_vcpu *vcpu);
	struct module *owner;
};

729 730
struct kvm_s390_crypto {
	struct kvm_s390_crypto_cb *crycb;
731
	struct kvm_s390_module_hook *pqap_hook;
732
	__u32 crycbd;
733 734
	__u8 aes_kw;
	__u8 dea_kw;
735
	__u8 apie;
736 737
};

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
#define APCB0_MASK_SIZE 1
struct kvm_s390_apcb0 {
	__u64 apm[APCB0_MASK_SIZE];		/* 0x0000 */
	__u64 aqm[APCB0_MASK_SIZE];		/* 0x0008 */
	__u64 adm[APCB0_MASK_SIZE];		/* 0x0010 */
	__u64 reserved18;			/* 0x0018 */
};

#define APCB1_MASK_SIZE 4
struct kvm_s390_apcb1 {
	__u64 apm[APCB1_MASK_SIZE];		/* 0x0000 */
	__u64 aqm[APCB1_MASK_SIZE];		/* 0x0020 */
	__u64 adm[APCB1_MASK_SIZE];		/* 0x0040 */
	__u64 reserved60[4];			/* 0x0060 */
};

754
struct kvm_s390_crypto_cb {
755 756 757 758 759
	struct kvm_s390_apcb0 apcb0;		/* 0x0000 */
	__u8   reserved20[0x0048 - 0x0020];	/* 0x0020 */
	__u8   dea_wrapping_key_mask[24];	/* 0x0048 */
	__u8   aes_wrapping_key_mask[32];	/* 0x0060 */
	struct kvm_s390_apcb1 apcb1;		/* 0x0080 */
760 761
};

762
struct kvm_s390_gisa {
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	union {
		struct { /* common to all formats */
			u32 next_alert;
			u8  ipm;
			u8  reserved01[2];
			u8  iam;
		};
		struct { /* format 0 */
			u32 next_alert;
			u8  ipm;
			u8  reserved01;
			u8  : 6;
			u8  g : 1;
			u8  c : 1;
			u8  iam;
			u8  reserved02[4];
			u32 airq_count;
		} g0;
		struct { /* format 1 */
			u32 next_alert;
			u8  ipm;
			u8  simm;
			u8  nimm;
			u8  iam;
			u8  aism[8];
			u8  : 6;
			u8  g : 1;
			u8  c : 1;
			u8  reserved03[11];
			u32 airq_count;
		} g1;
794 795 796
		struct {
			u64 word[4];
		} u64;
797
	};
798 799
};

800 801 802 803 804 805 806 807 808
struct kvm_s390_gib {
	u32 alert_list_origin;
	u32 reserved01;
	u8:5;
	u8  nisc:3;
	u8  reserved03[3];
	u32 reserved04[5];
};

809
/*
810 811
 * sie_page2 has to be allocated as DMA because fac_list, crycb and
 * gisa need 31bit addresses in the sie control block.
812 813 814 815
 */
struct sie_page2 {
	__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64];	/* 0x0000 */
	struct kvm_s390_crypto_cb crycb;		/* 0x0800 */
816
	struct kvm_s390_gisa gisa;			/* 0x0900 */
817 818
	struct kvm *kvm;				/* 0x0920 */
	u8 reserved928[0x1000 - 0x928];			/* 0x0928 */
819
};
820

821 822 823 824 825 826 827 828
struct kvm_s390_vsie {
	struct mutex mutex;
	struct radix_tree_root addr_to_page;
	int page_count;
	int next;
	struct page *pages[KVM_MAX_VCPUS];
};

829 830 831 832 833 834
struct kvm_s390_gisa_iam {
	u8 mask;
	spinlock_t ref_lock;
	u32 ref_count[MAX_ISC + 1];
};

835 836
struct kvm_s390_gisa_interrupt {
	struct kvm_s390_gisa *origin;
837
	struct kvm_s390_gisa_iam alert;
838 839 840
	struct hrtimer timer;
	u64 expires;
	DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
841 842
};

843
struct kvm_arch{
844 845
	void *sca;
	int use_esca;
846
	rwlock_t sca_lock;
847
	debug_info_t *dbf;
848
	struct kvm_s390_float_interrupt float_int;
849
	struct kvm_device *flic;
850
	struct gmap *gmap;
851
	unsigned long mem_limit;
852
	int css_support;
853
	int use_irqchip;
854
	int use_cmma;
855
	int use_pfmfi;
856
	int use_skf;
857
	int user_cpu_state_ctrl;
858
	int user_sigp;
859
	int user_stsi;
860
	int user_instr0;
861
	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
862
	wait_queue_head_t ipte_wq;
863 864
	int ipte_lock_count;
	struct mutex ipte_mutex;
865
	spinlock_t start_stop_lock;
866
	struct sie_page2 *sie_page2;
867
	struct kvm_s390_cpu_model model;
868
	struct kvm_s390_crypto crypto;
869
	struct kvm_s390_vsie vsie;
870
	u8 epdx;
871
	u64 epoch;
872 873
	int migration_mode;
	atomic64_t cmma_dirty_pages;
874 875
	/* subset of available cpu features enabled by user space */
	DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
876
	DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
877
	struct kvm_s390_gisa_interrupt gisa_int;
878 879
};

880 881 882 883 884 885 886 887
#define KVM_HVA_ERR_BAD		(-1UL)
#define KVM_HVA_ERR_RO_BAD	(-2UL)

static inline bool kvm_is_error_hva(unsigned long addr)
{
	return IS_ERR_VALUE(addr);
}

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
#define ASYNC_PF_PER_VCPU	64
struct kvm_arch_async_pf {
	unsigned long pfault_token;
};

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);

904
void kvm_arch_crypto_clear_masks(struct kvm *kvm);
P
Pierre Morel 已提交
905 906
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
			       unsigned long *aqm, unsigned long *adm);
907

908
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
909
extern char sie_exit;
910

911 912 913
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);

914
static inline void kvm_arch_hardware_disable(void) {}
915 916 917 918 919
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
920
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
921 922 923
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
		struct kvm_memory_slot *slot) {}
924 925
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
926

927 928
void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);

929
#endif