kvm_host.h 21.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
 * definition for kernel virtual machines on s390
4
 *
5
 * Copyright IBM Corp. 2008, 2018
6 7 8 9 10 11 12
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 */


#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
13 14

#include <linux/types.h>
15 16
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
17
#include <linux/kvm_types.h>
18
#include <linux/kvm_host.h>
19
#include <linux/kvm.h>
20
#include <linux/seqlock.h>
21
#include <asm/debug.h>
22
#include <asm/cpu.h>
23
#include <asm/fpu/api.h>
24
#include <asm/isc.h>
F
Fan Zhang 已提交
25
#include <asm/guarded_storage.h>
26

27 28
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
29
#define KVM_MAX_VCPUS 255
30
#define KVM_USER_MEM_SLOTS 32
31

32 33 34 35 36 37 38
/*
 * These seem to be used for allocating ->chip in the routing table,
 * which we don't use. 4096 is an out-of-thin-air value. If we need
 * to look at ->chip later on, we'll need to revisit this.
 */
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
39
#define KVM_HALT_POLL_NS_DEFAULT 80000
40

41
/* s390-specific vcpu->requests bit members */
42 43 44
#define KVM_REQ_ENABLE_IBS	KVM_ARCH_REQ(0)
#define KVM_REQ_DISABLE_IBS	KVM_ARCH_REQ(1)
#define KVM_REQ_ICPT_OPEREXC	KVM_ARCH_REQ(2)
45 46
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION  KVM_ARCH_REQ(4)
47

48 49
#define SIGP_CTRL_C		0x80
#define SIGP_CTRL_SCN_MASK	0x3f
50

51 52 53 54 55 56 57
union bsca_sigp_ctrl {
	__u8 value;
	struct {
		__u8 c : 1;
		__u8 r : 1;
		__u8 scn : 6;
	};
58
};
59 60 61 62 63 64 65 66

union esca_sigp_ctrl {
	__u16 value;
	struct {
		__u8 c : 1;
		__u8 reserved: 7;
		__u8 scn;
	};
67
};
68 69 70 71 72 73

struct esca_entry {
	union esca_sigp_ctrl sigp_ctrl;
	__u16   reserved1[3];
	__u64   sda;
	__u64   reserved2[6];
74
};
75 76

struct bsca_entry {
77
	__u8	reserved0;
78
	union bsca_sigp_ctrl	sigp_ctrl;
79
	__u16	reserved[3];
80 81
	__u64	sda;
	__u64	reserved2[2];
82
};
83

84 85 86 87 88 89 90 91
union ipte_control {
	unsigned long val;
	struct {
		unsigned long k  : 1;
		unsigned long kh : 31;
		unsigned long kg : 32;
	};
};
92

93
struct bsca_block {
94
	union ipte_control ipte_control;
95 96 97
	__u64	reserved[5];
	__u64	mcn;
	__u64	reserved2;
98
	struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
99
};
100

101 102 103 104 105 106
struct esca_block {
	union ipte_control ipte_control;
	__u64   reserved1[7];
	__u64   mcn[4];
	__u64   reserved2[20];
	struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
107
};
108

109 110 111 112 113 114 115 116 117 118 119 120 121 122
/*
 * This struct is used to store some machine check info from lowcore
 * for machine checks that happen while the guest is running.
 * This info in host's lowcore might be overwritten by a second machine
 * check from host when host is in the machine check's high-level handling.
 * The size is 24 bytes.
 */
struct mcck_volatile_info {
	__u64 mcic;
	__u64 failing_storage_address;
	__u32 ext_damage_code;
	__u32 reserved;
};

123
#define CPUSTAT_STOPPED    0x80000000
124 125 126 127 128 129 130 131 132 133 134 135 136 137
#define CPUSTAT_WAIT       0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT   0x04000000
#define CPUSTAT_IO_INT     0x02000000
#define CPUSTAT_EXT_INT    0x01000000
#define CPUSTAT_RUNNING    0x00800000
#define CPUSTAT_RETAINED   0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB    0x00010000
#define CPUSTAT_RRF        0x00008000
#define CPUSTAT_SLSV       0x00004000
#define CPUSTAT_SLSR       0x00002000
#define CPUSTAT_ZARCH      0x00000800
#define CPUSTAT_MCDS       0x00000100
138
#define CPUSTAT_KSS        0x00000200
139
#define CPUSTAT_SM         0x00000080
140
#define CPUSTAT_IBS        0x00000040
141
#define CPUSTAT_GED2       0x00000010
142
#define CPUSTAT_G          0x00000008
143
#define CPUSTAT_GED        0x00000004
144 145 146
#define CPUSTAT_J          0x00000002
#define CPUSTAT_P          0x00000001

147
struct kvm_s390_sie_block {
148
	atomic_t cpuflags;		/* 0x0000 */
149 150
	__u32 : 1;			/* 0x0004 */
	__u32 prefix : 18;
151 152
	__u32 : 1;
	__u32 ibc : 12;
153 154 155
	__u8	reserved08[4];		/* 0x0008 */
#define PROG_IN_SIE (1<<0)
	__u32	prog0c;			/* 0x000c */
156
	__u8	reserved10[16];		/* 0x0010 */
157 158
#define PROG_BLOCK_SIE	(1<<0)
#define PROG_REQUEST	(1<<1)
159 160
	atomic_t prog20;		/* 0x0020 */
	__u8	reserved24[4];		/* 0x0024 */
161 162 163
	__u64	cputm;			/* 0x0028 */
	__u64	ckc;			/* 0x0030 */
	__u64	epoch;			/* 0x0038 */
164
	__u32	svcc;			/* 0x0040 */
165
#define LCTL_CR0	0x8000
166
#define LCTL_CR6	0x0200
167 168 169
#define LCTL_CR9	0x0040
#define LCTL_CR10	0x0020
#define LCTL_CR11	0x0010
170
#define LCTL_CR14	0x0002
171 172
	__u16   lctl;			/* 0x0044 */
	__s16	icpua;			/* 0x0046 */
J
Janosch Frank 已提交
173
#define ICTL_OPEREXC	0x80000000
174 175 176 177 178 179
#define ICTL_PINT	0x20000000
#define ICTL_LPSW	0x00400000
#define ICTL_STCTL	0x00040000
#define ICTL_ISKE	0x00004000
#define ICTL_SSKE	0x00002000
#define ICTL_RRBE	0x00001000
180
#define ICTL_TPROT	0x00000200
181
	__u32	ictl;			/* 0x0048 */
182 183 184 185 186 187 188
#define ECA_CEI		0x80000000
#define ECA_IB		0x40000000
#define ECA_SIGPI	0x10000000
#define ECA_MVPGI	0x01000000
#define ECA_VX		0x00020000
#define ECA_PROTEXCI	0x00002000
#define ECA_SII		0x00000001
189
	__u32	eca;			/* 0x004c */
190 191 192
#define ICPT_INST	0x04
#define ICPT_PROGI	0x08
#define ICPT_INSTPROGI	0x0C
193
#define ICPT_EXTREQ	0x10
194
#define ICPT_EXTINT	0x14
195 196
#define ICPT_IOREQ	0x18
#define ICPT_WAIT	0x1c
197 198
#define ICPT_VALIDITY	0x20
#define ICPT_STOP	0x28
199 200 201
#define ICPT_OPEREXC	0x2C
#define ICPT_PARTEXEC	0x38
#define ICPT_IOINST	0x40
202
#define ICPT_KSS	0x5c
203
	__u8	icptcode;		/* 0x0050 */
204
	__u8	icptstatus;		/* 0x0051 */
205 206 207 208 209 210
	__u16	ihcpu;			/* 0x0052 */
	__u8	reserved54[2];		/* 0x0054 */
	__u16	ipa;			/* 0x0056 */
	__u32	ipb;			/* 0x0058 */
	__u32	scaoh;			/* 0x005c */
	__u8	reserved60;		/* 0x0060 */
F
Fan Zhang 已提交
211
#define ECB_GS		0x40
212 213 214
#define ECB_TE		0x10
#define ECB_SRSI	0x04
#define ECB_HOSTPROTINT	0x02
215
	__u8	ecb;			/* 0x0061 */
216 217 218 219
#define ECB2_CMMA	0x80
#define ECB2_IEP	0x20
#define ECB2_PFMFI	0x08
#define ECB2_ESCA	0x04
220
	__u8    ecb2;                   /* 0x0062 */
221
#define ECB3_DEA 0x08
222 223
#define ECB3_AES 0x04
#define ECB3_RI  0x01
224
	__u8    ecb3;			/* 0x0063 */
225
	__u32	scaol;			/* 0x0064 */
226 227 228
	__u8	reserved68;		/* 0x0068 */
	__u8    epdx;			/* 0x0069 */
	__u8    reserved6a[2];		/* 0x006a */
229
	__u32	todpr;			/* 0x006c */
230 231 232
	__u8	reserved70[16];		/* 0x0070 */
	__u64	mso;			/* 0x0080 */
	__u64	msl;			/* 0x0088 */
233 234 235
	psw_t	gpsw;			/* 0x0090 */
	__u64	gg14;			/* 0x00a0 */
	__u64	gg15;			/* 0x00a8 */
236 237 238 239
	__u8	reservedb0[20];		/* 0x00b0 */
	__u16	extcpuaddr;		/* 0x00c4 */
	__u16	eic;			/* 0x00c6 */
	__u32	reservedc8;		/* 0x00c8 */
240 241 242 243 244 245 246 247 248 249 250 251 252
	__u16	pgmilc;			/* 0x00cc */
	__u16	iprcc;			/* 0x00ce */
	__u32	dxc;			/* 0x00d0 */
	__u16	mcn;			/* 0x00d4 */
	__u8	perc;			/* 0x00d6 */
	__u8	peratmid;		/* 0x00d7 */
	__u64	peraddr;		/* 0x00d8 */
	__u8	eai;			/* 0x00e0 */
	__u8	peraid;			/* 0x00e1 */
	__u8	oai;			/* 0x00e2 */
	__u8	armid;			/* 0x00e3 */
	__u8	reservede4[4];		/* 0x00e4 */
	__u64	tecmc;			/* 0x00e8 */
253 254
	__u8	reservedf0[12];		/* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
255
#define CRYCB_FORMAT2 0x00000003
256
	__u32	crycbd;			/* 0x00fc */
257 258
	__u64	gcr[16];		/* 0x0100 */
	__u64	gbea;			/* 0x0180 */
F
Fan Zhang 已提交
259 260 261
	__u8    reserved188[8];		/* 0x0188 */
	__u64   sdnxo;			/* 0x0190 */
	__u8    reserved198[8];		/* 0x0198 */
262
	__u32	fac;			/* 0x01a0 */
263 264
	__u8	reserved1a4[20];	/* 0x01a4 */
	__u64	cbrlo;			/* 0x01b8 */
265
	__u8	reserved1c0[8];		/* 0x01c0 */
266
#define ECD_HOSTREGMGMT	0x20000000
267
#define ECD_MEF		0x08000000
268 269
	__u32	ecd;			/* 0x01c8 */
	__u8	reserved1cc[18];	/* 0x01cc */
270 271
	__u64	pp;			/* 0x01de */
	__u8	reserved1e6[2];		/* 0x01e6 */
272
	__u64	itdba;			/* 0x01e8 */
273
	__u64   riccbd;			/* 0x01f0 */
274
	__u64	gvrd;			/* 0x01f8 */
275 276
} __attribute__((packed));

277 278
struct kvm_s390_itdb {
	__u8	data[256];
279
};
280 281 282

struct sie_page {
	struct kvm_s390_sie_block sie_block;
283 284
	struct mcck_volatile_info mcck_info;	/* 0x0200 */
	__u8 reserved218[1000];		/* 0x0218 */
285
	struct kvm_s390_itdb itdb;	/* 0x0600 */
286
	__u8 reserved700[2304];		/* 0x0700 */
287
};
288

289
struct kvm_vcpu_stat {
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	u64 exit_userspace;
	u64 exit_null;
	u64 exit_external_request;
	u64 exit_external_interrupt;
	u64 exit_stop_request;
	u64 exit_validity;
	u64 exit_instruction;
	u64 exit_pei;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 instruction_lctl;
	u64 instruction_lctlg;
	u64 instruction_stctl;
	u64 instruction_stctg;
	u64 exit_program_interruption;
	u64 exit_instr_and_program;
	u64 exit_operation_exception;
	u64 deliver_external_call;
	u64 deliver_emergency_signal;
	u64 deliver_service_signal;
	u64 deliver_virtio_interrupt;
	u64 deliver_stop_signal;
	u64 deliver_prefix_signal;
	u64 deliver_restart_signal;
	u64 deliver_program_int;
	u64 deliver_io_int;
	u64 exit_wait_state;
319 320 321 322 323
	u64 instruction_epsw;
	u64 instruction_gs;
	u64 instruction_io_other;
	u64 instruction_lpsw;
	u64 instruction_lpswe;
324
	u64 instruction_pfmf;
325 326 327
	u64 instruction_ptff;
	u64 instruction_sck;
	u64 instruction_sckpf;
328 329 330 331
	u64 instruction_stidp;
	u64 instruction_spx;
	u64 instruction_stpx;
	u64 instruction_stap;
332 333 334 335
	u64 instruction_iske;
	u64 instruction_ri;
	u64 instruction_rrbe;
	u64 instruction_sske;
336 337 338
	u64 instruction_ipte_interlock;
	u64 instruction_stsi;
	u64 instruction_stfl;
339 340
	u64 instruction_tb;
	u64 instruction_tpi;
341
	u64 instruction_tprot;
342
	u64 instruction_tsch;
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
	u64 instruction_sie;
	u64 instruction_essa;
	u64 instruction_sthyi;
	u64 instruction_sigp_sense;
	u64 instruction_sigp_sense_running;
	u64 instruction_sigp_external_call;
	u64 instruction_sigp_emergency;
	u64 instruction_sigp_cond_emergency;
	u64 instruction_sigp_start;
	u64 instruction_sigp_stop;
	u64 instruction_sigp_stop_store_status;
	u64 instruction_sigp_store_status;
	u64 instruction_sigp_store_adtl_status;
	u64 instruction_sigp_arch;
	u64 instruction_sigp_prefix;
	u64 instruction_sigp_restart;
	u64 instruction_sigp_init_cpu_reset;
	u64 instruction_sigp_cpu_reset;
	u64 instruction_sigp_unknown;
	u64 diagnose_10;
	u64 diagnose_44;
	u64 diagnose_9c;
	u64 diagnose_258;
	u64 diagnose_308;
	u64 diagnose_500;
368
	u64 diagnose_other;
369 370
};

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
#define PGM_OPERATION			0x01
#define PGM_PRIVILEGED_OP		0x02
#define PGM_EXECUTE			0x03
#define PGM_PROTECTION			0x04
#define PGM_ADDRESSING			0x05
#define PGM_SPECIFICATION		0x06
#define PGM_DATA			0x07
#define PGM_FIXED_POINT_OVERFLOW	0x08
#define PGM_FIXED_POINT_DIVIDE		0x09
#define PGM_DECIMAL_OVERFLOW		0x0a
#define PGM_DECIMAL_DIVIDE		0x0b
#define PGM_HFP_EXPONENT_OVERFLOW	0x0c
#define PGM_HFP_EXPONENT_UNDERFLOW	0x0d
#define PGM_HFP_SIGNIFICANCE		0x0e
#define PGM_HFP_DIVIDE			0x0f
#define PGM_SEGMENT_TRANSLATION		0x10
#define PGM_PAGE_TRANSLATION		0x11
#define PGM_TRANSLATION_SPEC		0x12
#define PGM_SPECIAL_OPERATION		0x13
#define PGM_OPERAND			0x15
#define PGM_TRACE_TABEL			0x16
E
Eric Farman 已提交
392
#define PGM_VECTOR_PROCESSING		0x1b
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
#define PGM_SPACE_SWITCH		0x1c
#define PGM_HFP_SQUARE_ROOT		0x1d
#define PGM_PC_TRANSLATION_SPEC		0x1f
#define PGM_AFX_TRANSLATION		0x20
#define PGM_ASX_TRANSLATION		0x21
#define PGM_LX_TRANSLATION		0x22
#define PGM_EX_TRANSLATION		0x23
#define PGM_PRIMARY_AUTHORITY		0x24
#define PGM_SECONDARY_AUTHORITY		0x25
#define PGM_LFX_TRANSLATION		0x26
#define PGM_LSX_TRANSLATION		0x27
#define PGM_ALET_SPECIFICATION		0x28
#define PGM_ALEN_TRANSLATION		0x29
#define PGM_ALE_SEQUENCE		0x2a
#define PGM_ASTE_VALIDITY		0x2b
#define PGM_ASTE_SEQUENCE		0x2c
#define PGM_EXTENDED_AUTHORITY		0x2d
#define PGM_LSTE_SEQUENCE		0x2e
#define PGM_ASTE_INSTANCE		0x2f
#define PGM_STACK_FULL			0x30
#define PGM_STACK_EMPTY			0x31
#define PGM_STACK_SPECIFICATION		0x32
#define PGM_STACK_TYPE			0x33
#define PGM_STACK_OPERATION		0x34
#define PGM_ASCE_TYPE			0x38
#define PGM_REGION_FIRST_TRANS		0x39
#define PGM_REGION_SECOND_TRANS		0x3a
#define PGM_REGION_THIRD_TRANS		0x3b
#define PGM_MONITOR			0x40
#define PGM_PER				0x80
#define PGM_CRYPTO_OPERATION		0x119
424

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
/* irq types in order of priority */
enum irq_types {
	IRQ_PEND_MCHK_EX = 0,
	IRQ_PEND_SVC,
	IRQ_PEND_PROG,
	IRQ_PEND_MCHK_REP,
	IRQ_PEND_EXT_IRQ_KEY,
	IRQ_PEND_EXT_MALFUNC,
	IRQ_PEND_EXT_EMERGENCY,
	IRQ_PEND_EXT_EXTERNAL,
	IRQ_PEND_EXT_CLOCK_COMP,
	IRQ_PEND_EXT_CPU_TIMER,
	IRQ_PEND_EXT_TIMING,
	IRQ_PEND_EXT_SERVICE,
	IRQ_PEND_EXT_HOST,
	IRQ_PEND_PFAULT_INIT,
	IRQ_PEND_PFAULT_DONE,
	IRQ_PEND_VIRTIO,
	IRQ_PEND_IO_ISC_0,
	IRQ_PEND_IO_ISC_1,
	IRQ_PEND_IO_ISC_2,
	IRQ_PEND_IO_ISC_3,
	IRQ_PEND_IO_ISC_4,
	IRQ_PEND_IO_ISC_5,
	IRQ_PEND_IO_ISC_6,
	IRQ_PEND_IO_ISC_7,
	IRQ_PEND_SIGP_STOP,
	IRQ_PEND_RESTART,
	IRQ_PEND_SET_PREFIX,
	IRQ_PEND_COUNT
};

457 458 459 460 461
/* We have 2M for virtio device descriptor pages. Smallest amount of
 * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
 */
#define KVM_S390_MAX_VIRTIO_IRQS 87381

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
/*
 * Repressible (non-floating) machine check interrupts
 * subclass bits in MCIC
 */
#define MCHK_EXTD_BIT 58
#define MCHK_DEGR_BIT 56
#define MCHK_WARN_BIT 55
#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
		       (1UL << MCHK_EXTD_BIT) | \
		       (1UL << MCHK_WARN_BIT))

/* Exigent machine check interrupts subclass bits in MCIC */
#define MCHK_SD_BIT 63
#define MCHK_PD_BIT 62
#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))

#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY)    | \
			   (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
			   (1UL << IRQ_PEND_EXT_CPU_TIMER)  | \
			   (1UL << IRQ_PEND_EXT_MALFUNC)    | \
			   (1UL << IRQ_PEND_EXT_EMERGENCY)  | \
			   (1UL << IRQ_PEND_EXT_EXTERNAL)   | \
			   (1UL << IRQ_PEND_EXT_TIMING)     | \
			   (1UL << IRQ_PEND_EXT_HOST)       | \
			   (1UL << IRQ_PEND_EXT_SERVICE)    | \
			   (1UL << IRQ_PEND_VIRTIO)         | \
			   (1UL << IRQ_PEND_PFAULT_INIT)    | \
			   (1UL << IRQ_PEND_PFAULT_DONE))

#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
			  (1UL << IRQ_PEND_IO_ISC_1) | \
			  (1UL << IRQ_PEND_IO_ISC_2) | \
			  (1UL << IRQ_PEND_IO_ISC_3) | \
			  (1UL << IRQ_PEND_IO_ISC_4) | \
			  (1UL << IRQ_PEND_IO_ISC_5) | \
			  (1UL << IRQ_PEND_IO_ISC_6) | \
			  (1UL << IRQ_PEND_IO_ISC_7))

#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
			    (1UL << IRQ_PEND_MCHK_EX))

503
struct kvm_s390_interrupt_info {
504 505 506
	struct list_head list;
	u64	type;
	union {
507 508 509
		struct kvm_s390_io_info io;
		struct kvm_s390_ext_info ext;
		struct kvm_s390_pgm_info pgm;
510
		struct kvm_s390_emerg_info emerg;
511
		struct kvm_s390_extcall_info extcall;
512
		struct kvm_s390_prefix_info prefix;
513
		struct kvm_s390_stop_info stop;
514
		struct kvm_s390_mchk_info mchk;
515 516 517
	};
};

518 519 520 521 522 523 524
struct kvm_s390_irq_payload {
	struct kvm_s390_io_info io;
	struct kvm_s390_ext_info ext;
	struct kvm_s390_pgm_info pgm;
	struct kvm_s390_emerg_info emerg;
	struct kvm_s390_extcall_info extcall;
	struct kvm_s390_prefix_info prefix;
525
	struct kvm_s390_stop_info stop;
526 527 528
	struct kvm_s390_mchk_info mchk;
};

529
struct kvm_s390_local_interrupt {
530
	spinlock_t lock;
531 532 533
	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
	struct kvm_s390_irq_payload irq;
	unsigned long pending_irqs;
534 535
};

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
#define FIRQ_LIST_IO_ISC_0 0
#define FIRQ_LIST_IO_ISC_1 1
#define FIRQ_LIST_IO_ISC_2 2
#define FIRQ_LIST_IO_ISC_3 3
#define FIRQ_LIST_IO_ISC_4 4
#define FIRQ_LIST_IO_ISC_5 5
#define FIRQ_LIST_IO_ISC_6 6
#define FIRQ_LIST_IO_ISC_7 7
#define FIRQ_LIST_PFAULT   8
#define FIRQ_LIST_VIRTIO   9
#define FIRQ_LIST_COUNT   10
#define FIRQ_CNTR_IO       0
#define FIRQ_CNTR_SERVICE  1
#define FIRQ_CNTR_VIRTIO   2
#define FIRQ_CNTR_PFAULT   3
#define FIRQ_MAX_COUNT     4

553 554 555 556 557 558
/* mask the AIS mode for a given ISC */
#define AIS_MODE_MASK(isc) (0x80 >> isc)

#define KVM_S390_AIS_MODE_ALL    0
#define KVM_S390_AIS_MODE_SINGLE 1

559
struct kvm_s390_float_interrupt {
560
	unsigned long pending_irqs;
561
	spinlock_t lock;
562 563 564 565
	struct list_head lists[FIRQ_LIST_COUNT];
	int counters[FIRQ_MAX_COUNT];
	struct kvm_s390_mchk_info mchk;
	struct kvm_s390_ext_info srv_signal;
566
	int next_rr_cpu;
567
	unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
568 569 570
	struct mutex ais_lock;
	u8 simm;
	u8 nimm;
571 572
};

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
struct kvm_hw_wp_info_arch {
	unsigned long addr;
	unsigned long phys_addr;
	int len;
	char *old_data;
};

struct kvm_hw_bp_info_arch {
	unsigned long addr;
	int len;
};

/*
 * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
 * Further KVM_GUESTDBG flags which an be used from userspace can be found in
 * arch/s390/include/uapi/asm/kvm.h
 */
#define KVM_GUESTDBG_EXIT_PENDING 0x10000000

#define guestdbg_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
#define guestdbg_sstep_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
#define guestdbg_hw_bp_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
		(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))

struct kvm_guestdbg_info_arch {
	unsigned long cr0;
	unsigned long cr9;
	unsigned long cr10;
	unsigned long cr11;
	struct kvm_hw_bp_info_arch *hw_bp_info;
	struct kvm_hw_wp_info_arch *hw_wp_info;
	int nr_hw_bp;
	int nr_hw_wp;
	unsigned long last_bp;
};
612

613
struct kvm_vcpu_arch {
614
	struct kvm_s390_sie_block *sie_block;
615 616
	/* if vsie is active, currently executed shadow sie control block */
	struct kvm_s390_sie_block *vsie_block;
617
	unsigned int      host_acrs[NUM_ACRS];
F
Fan Zhang 已提交
618
	struct gs_cb      *host_gscb;
619
	struct fpu	  host_fpregs;
620
	struct kvm_s390_local_interrupt local_int;
621
	struct hrtimer    ckc_timer;
622
	struct kvm_s390_pgm_info pgm;
623
	struct gmap *gmap;
624 625
	/* backup location for the currently enabled gmap when scheduled out */
	struct gmap *enabled_gmap;
626
	struct kvm_guestdbg_info_arch guestdbg;
627 628 629
	unsigned long pfault_token;
	unsigned long pfault_select;
	unsigned long pfault_compare;
630
	bool cputm_enabled;
631 632 633 634 635 636 637
	/*
	 * The seqcount protects updates to cputm_start and sie_block.cputm,
	 * this way we can have non-blocking reads with consistent values.
	 * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
	 * values and to start/stop/enable/disable cpu timer accounting.
	 */
	seqcount_t cputm_seqcount;
638
	__u64 cputm_start;
F
Fan Zhang 已提交
639
	bool gs_enabled;
640 641 642
};

struct kvm_vm_stat {
643
	ulong remote_tlb_flush;
644 645
};

646 647 648
struct kvm_arch_memory_slot {
};

649 650 651 652 653 654 655 656 657 658 659 660 661
struct s390_map_info {
	struct list_head list;
	__u64 guest_addr;
	__u64 addr;
	struct page *page;
};

struct s390_io_adapter {
	unsigned int id;
	int isc;
	bool maskable;
	bool masked;
	bool swap;
662
	bool suppressible;
663 664 665 666 667 668 669 670
	struct rw_semaphore maps_lock;
	struct list_head maps;
	atomic_t nr_maps;
};

#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256

671 672 673 674 675 676 677 678 679
/* maximum size of facilities and facility mask is 2k bytes */
#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
#define S390_ARCH_FAC_LIST_SIZE_U64 \
	(S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
#define S390_ARCH_FAC_MASK_SIZE_U64 \
	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))

struct kvm_s390_cpu_model {
680 681 682 683
	/* facility mask supported by kvm & hosting machine */
	__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
	/* facility list requested by guest (in dma page) */
	__u64 *fac_list;
684
	u64 cpuid;
685
	unsigned short ibc;
686 687
};

688 689 690
struct kvm_s390_crypto {
	struct kvm_s390_crypto_cb *crycb;
	__u32 crycbd;
691 692
	__u8 aes_kw;
	__u8 dea_kw;
693 694
};

695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
#define APCB0_MASK_SIZE 1
struct kvm_s390_apcb0 {
	__u64 apm[APCB0_MASK_SIZE];		/* 0x0000 */
	__u64 aqm[APCB0_MASK_SIZE];		/* 0x0008 */
	__u64 adm[APCB0_MASK_SIZE];		/* 0x0010 */
	__u64 reserved18;			/* 0x0018 */
};

#define APCB1_MASK_SIZE 4
struct kvm_s390_apcb1 {
	__u64 apm[APCB1_MASK_SIZE];		/* 0x0000 */
	__u64 aqm[APCB1_MASK_SIZE];		/* 0x0020 */
	__u64 adm[APCB1_MASK_SIZE];		/* 0x0040 */
	__u64 reserved60[4];			/* 0x0060 */
};

711
struct kvm_s390_crypto_cb {
712 713 714 715 716
	struct kvm_s390_apcb0 apcb0;		/* 0x0000 */
	__u8   reserved20[0x0048 - 0x0020];	/* 0x0020 */
	__u8   dea_wrapping_key_mask[24];	/* 0x0048 */
	__u8   aes_wrapping_key_mask[32];	/* 0x0060 */
	struct kvm_s390_apcb1 apcb1;		/* 0x0080 */
717 718
};

719 720 721 722 723 724 725 726
/*
 * sie_page2 has to be allocated as DMA because fac_list and crycb need
 * 31bit addresses in the sie control block.
 */
struct sie_page2 {
	__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64];	/* 0x0000 */
	struct kvm_s390_crypto_cb crycb;		/* 0x0800 */
	u8 reserved900[0x1000 - 0x900];			/* 0x0900 */
727
};
728

729 730 731 732 733 734 735 736
struct kvm_s390_vsie {
	struct mutex mutex;
	struct radix_tree_root addr_to_page;
	int page_count;
	int next;
	struct page *pages[KVM_MAX_VCPUS];
};

737 738 739 740 741 742
struct kvm_s390_migration_state {
	unsigned long bitmap_size;	/* in bits (number of guest pages) */
	atomic64_t dirty_pages;		/* number of dirty pages */
	unsigned long *pgste_bitmap;
};

743
struct kvm_arch{
744 745
	void *sca;
	int use_esca;
746
	rwlock_t sca_lock;
747
	debug_info_t *dbf;
748
	struct kvm_s390_float_interrupt float_int;
749
	struct kvm_device *flic;
750
	struct gmap *gmap;
751
	unsigned long mem_limit;
752
	int css_support;
753
	int use_irqchip;
754
	int use_cmma;
755
	int user_cpu_state_ctrl;
756
	int user_sigp;
757
	int user_stsi;
758
	int user_instr0;
759
	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
760
	wait_queue_head_t ipte_wq;
761 762
	int ipte_lock_count;
	struct mutex ipte_mutex;
763
	spinlock_t start_stop_lock;
764
	struct sie_page2 *sie_page2;
765
	struct kvm_s390_cpu_model model;
766
	struct kvm_s390_crypto crypto;
767
	struct kvm_s390_vsie vsie;
768
	u8 epdx;
769
	u64 epoch;
770
	struct kvm_s390_migration_state *migration_state;
771 772
	/* subset of available cpu features enabled by user space */
	DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
773 774
};

775 776 777 778 779 780 781 782
#define KVM_HVA_ERR_BAD		(-1UL)
#define KVM_HVA_ERR_RO_BAD	(-2UL)

static inline bool kvm_is_error_hva(unsigned long addr)
{
	return IS_ERR_VALUE(addr);
}

783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
#define ASYNC_PF_PER_VCPU	64
struct kvm_arch_async_pf {
	unsigned long pfault_token;
};

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);

799
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
800
extern char sie_exit;
801

802
static inline void kvm_arch_hardware_disable(void) {}
803 804 805 806 807 808
static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
809
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
810 811 812
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
		struct kvm_memory_slot *slot) {}
813 814
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
815

816 817
void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);

818
#endif