kvm_host.h 22.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
 * definition for kernel virtual machines on s390
4
 *
5
 * Copyright IBM Corp. 2008, 2018
6 7 8 9 10 11 12
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 */


#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
13 14

#include <linux/types.h>
15 16
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
17
#include <linux/kvm_types.h>
18
#include <linux/kvm_host.h>
19
#include <linux/kvm.h>
20
#include <linux/seqlock.h>
21
#include <asm/debug.h>
22
#include <asm/cpu.h>
23
#include <asm/fpu/api.h>
24
#include <asm/isc.h>
F
Fan Zhang 已提交
25
#include <asm/guarded_storage.h>
26

27 28
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
29
#define KVM_MAX_VCPUS 255
30
#define KVM_USER_MEM_SLOTS 32
31

32 33 34 35 36 37 38
/*
 * These seem to be used for allocating ->chip in the routing table,
 * which we don't use. 4096 is an out-of-thin-air value. If we need
 * to look at ->chip later on, we'll need to revisit this.
 */
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
39
#define KVM_HALT_POLL_NS_DEFAULT 80000
40

41
/* s390-specific vcpu->requests bit members */
42 43 44
#define KVM_REQ_ENABLE_IBS	KVM_ARCH_REQ(0)
#define KVM_REQ_DISABLE_IBS	KVM_ARCH_REQ(1)
#define KVM_REQ_ICPT_OPEREXC	KVM_ARCH_REQ(2)
45 46
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION  KVM_ARCH_REQ(4)
47

48 49
#define SIGP_CTRL_C		0x80
#define SIGP_CTRL_SCN_MASK	0x3f
50

51 52 53 54 55 56 57
union bsca_sigp_ctrl {
	__u8 value;
	struct {
		__u8 c : 1;
		__u8 r : 1;
		__u8 scn : 6;
	};
58
};
59 60 61 62 63 64 65 66

union esca_sigp_ctrl {
	__u16 value;
	struct {
		__u8 c : 1;
		__u8 reserved: 7;
		__u8 scn;
	};
67
};
68 69 70 71 72 73

struct esca_entry {
	union esca_sigp_ctrl sigp_ctrl;
	__u16   reserved1[3];
	__u64   sda;
	__u64   reserved2[6];
74
};
75 76

struct bsca_entry {
77
	__u8	reserved0;
78
	union bsca_sigp_ctrl	sigp_ctrl;
79
	__u16	reserved[3];
80 81
	__u64	sda;
	__u64	reserved2[2];
82
};
83

84 85 86 87 88 89 90 91
union ipte_control {
	unsigned long val;
	struct {
		unsigned long k  : 1;
		unsigned long kh : 31;
		unsigned long kg : 32;
	};
};
92

93
struct bsca_block {
94
	union ipte_control ipte_control;
95 96 97
	__u64	reserved[5];
	__u64	mcn;
	__u64	reserved2;
98
	struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
99
};
100

101 102 103 104 105 106
struct esca_block {
	union ipte_control ipte_control;
	__u64   reserved1[7];
	__u64   mcn[4];
	__u64   reserved2[20];
	struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
107
};
108

109 110 111 112 113 114 115 116 117 118 119 120 121 122
/*
 * This struct is used to store some machine check info from lowcore
 * for machine checks that happen while the guest is running.
 * This info in host's lowcore might be overwritten by a second machine
 * check from host when host is in the machine check's high-level handling.
 * The size is 24 bytes.
 */
struct mcck_volatile_info {
	__u64 mcic;
	__u64 failing_storage_address;
	__u32 ext_damage_code;
	__u32 reserved;
};

123
#define CPUSTAT_STOPPED    0x80000000
124 125 126 127 128 129 130 131 132 133 134 135 136 137
#define CPUSTAT_WAIT       0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT   0x04000000
#define CPUSTAT_IO_INT     0x02000000
#define CPUSTAT_EXT_INT    0x01000000
#define CPUSTAT_RUNNING    0x00800000
#define CPUSTAT_RETAINED   0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB    0x00010000
#define CPUSTAT_RRF        0x00008000
#define CPUSTAT_SLSV       0x00004000
#define CPUSTAT_SLSR       0x00002000
#define CPUSTAT_ZARCH      0x00000800
#define CPUSTAT_MCDS       0x00000100
138
#define CPUSTAT_KSS        0x00000200
139
#define CPUSTAT_SM         0x00000080
140
#define CPUSTAT_IBS        0x00000040
141
#define CPUSTAT_GED2       0x00000010
142
#define CPUSTAT_G          0x00000008
143
#define CPUSTAT_GED        0x00000004
144 145 146
#define CPUSTAT_J          0x00000002
#define CPUSTAT_P          0x00000001

147
struct kvm_s390_sie_block {
148
	atomic_t cpuflags;		/* 0x0000 */
149 150
	__u32 : 1;			/* 0x0004 */
	__u32 prefix : 18;
151 152
	__u32 : 1;
	__u32 ibc : 12;
153 154 155
	__u8	reserved08[4];		/* 0x0008 */
#define PROG_IN_SIE (1<<0)
	__u32	prog0c;			/* 0x000c */
156
	__u8	reserved10[16];		/* 0x0010 */
157 158
#define PROG_BLOCK_SIE	(1<<0)
#define PROG_REQUEST	(1<<1)
159 160
	atomic_t prog20;		/* 0x0020 */
	__u8	reserved24[4];		/* 0x0024 */
161 162 163
	__u64	cputm;			/* 0x0028 */
	__u64	ckc;			/* 0x0030 */
	__u64	epoch;			/* 0x0038 */
164
	__u32	svcc;			/* 0x0040 */
165
#define LCTL_CR0	0x8000
166
#define LCTL_CR6	0x0200
167 168 169
#define LCTL_CR9	0x0040
#define LCTL_CR10	0x0020
#define LCTL_CR11	0x0010
170
#define LCTL_CR14	0x0002
171 172
	__u16   lctl;			/* 0x0044 */
	__s16	icpua;			/* 0x0046 */
J
Janosch Frank 已提交
173
#define ICTL_OPEREXC	0x80000000
174 175 176 177 178 179
#define ICTL_PINT	0x20000000
#define ICTL_LPSW	0x00400000
#define ICTL_STCTL	0x00040000
#define ICTL_ISKE	0x00004000
#define ICTL_SSKE	0x00002000
#define ICTL_RRBE	0x00001000
180
#define ICTL_TPROT	0x00000200
181
	__u32	ictl;			/* 0x0048 */
182 183 184 185
#define ECA_CEI		0x80000000
#define ECA_IB		0x40000000
#define ECA_SIGPI	0x10000000
#define ECA_MVPGI	0x01000000
186
#define ECA_AIV		0x00200000
187 188 189
#define ECA_VX		0x00020000
#define ECA_PROTEXCI	0x00002000
#define ECA_SII		0x00000001
190
	__u32	eca;			/* 0x004c */
191 192 193
#define ICPT_INST	0x04
#define ICPT_PROGI	0x08
#define ICPT_INSTPROGI	0x0C
194
#define ICPT_EXTREQ	0x10
195
#define ICPT_EXTINT	0x14
196 197
#define ICPT_IOREQ	0x18
#define ICPT_WAIT	0x1c
198 199
#define ICPT_VALIDITY	0x20
#define ICPT_STOP	0x28
200 201 202
#define ICPT_OPEREXC	0x2C
#define ICPT_PARTEXEC	0x38
#define ICPT_IOINST	0x40
203
#define ICPT_KSS	0x5c
204
	__u8	icptcode;		/* 0x0050 */
205
	__u8	icptstatus;		/* 0x0051 */
206 207 208 209 210
	__u16	ihcpu;			/* 0x0052 */
	__u8	reserved54[2];		/* 0x0054 */
	__u16	ipa;			/* 0x0056 */
	__u32	ipb;			/* 0x0058 */
	__u32	scaoh;			/* 0x005c */
211 212
#define FPF_BPBC 	0x20
	__u8	fpf;			/* 0x0060 */
F
Fan Zhang 已提交
213
#define ECB_GS		0x40
214 215 216
#define ECB_TE		0x10
#define ECB_SRSI	0x04
#define ECB_HOSTPROTINT	0x02
217
	__u8	ecb;			/* 0x0061 */
218 219 220 221
#define ECB2_CMMA	0x80
#define ECB2_IEP	0x20
#define ECB2_PFMFI	0x08
#define ECB2_ESCA	0x04
222
	__u8    ecb2;                   /* 0x0062 */
223
#define ECB3_DEA 0x08
224 225
#define ECB3_AES 0x04
#define ECB3_RI  0x01
226
	__u8    ecb3;			/* 0x0063 */
227
	__u32	scaol;			/* 0x0064 */
228 229 230
	__u8	reserved68;		/* 0x0068 */
	__u8    epdx;			/* 0x0069 */
	__u8    reserved6a[2];		/* 0x006a */
231
	__u32	todpr;			/* 0x006c */
232
#define GISA_FORMAT1 0x00000001
233 234
	__u32	gd;			/* 0x0070 */
	__u8	reserved74[12];		/* 0x0074 */
235 236
	__u64	mso;			/* 0x0080 */
	__u64	msl;			/* 0x0088 */
237 238 239
	psw_t	gpsw;			/* 0x0090 */
	__u64	gg14;			/* 0x00a0 */
	__u64	gg15;			/* 0x00a8 */
240 241 242 243
	__u8	reservedb0[20];		/* 0x00b0 */
	__u16	extcpuaddr;		/* 0x00c4 */
	__u16	eic;			/* 0x00c6 */
	__u32	reservedc8;		/* 0x00c8 */
244 245 246 247 248 249 250 251 252 253 254 255 256
	__u16	pgmilc;			/* 0x00cc */
	__u16	iprcc;			/* 0x00ce */
	__u32	dxc;			/* 0x00d0 */
	__u16	mcn;			/* 0x00d4 */
	__u8	perc;			/* 0x00d6 */
	__u8	peratmid;		/* 0x00d7 */
	__u64	peraddr;		/* 0x00d8 */
	__u8	eai;			/* 0x00e0 */
	__u8	peraid;			/* 0x00e1 */
	__u8	oai;			/* 0x00e2 */
	__u8	armid;			/* 0x00e3 */
	__u8	reservede4[4];		/* 0x00e4 */
	__u64	tecmc;			/* 0x00e8 */
257 258
	__u8	reservedf0[12];		/* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
259
#define CRYCB_FORMAT2 0x00000003
260
	__u32	crycbd;			/* 0x00fc */
261 262
	__u64	gcr[16];		/* 0x0100 */
	__u64	gbea;			/* 0x0180 */
F
Fan Zhang 已提交
263 264 265
	__u8    reserved188[8];		/* 0x0188 */
	__u64   sdnxo;			/* 0x0190 */
	__u8    reserved198[8];		/* 0x0198 */
266
	__u32	fac;			/* 0x01a0 */
267 268
	__u8	reserved1a4[20];	/* 0x01a4 */
	__u64	cbrlo;			/* 0x01b8 */
269
	__u8	reserved1c0[8];		/* 0x01c0 */
270
#define ECD_HOSTREGMGMT	0x20000000
271
#define ECD_MEF		0x08000000
272 273
	__u32	ecd;			/* 0x01c8 */
	__u8	reserved1cc[18];	/* 0x01cc */
274 275
	__u64	pp;			/* 0x01de */
	__u8	reserved1e6[2];		/* 0x01e6 */
276
	__u64	itdba;			/* 0x01e8 */
277
	__u64   riccbd;			/* 0x01f0 */
278
	__u64	gvrd;			/* 0x01f8 */
279 280
} __attribute__((packed));

281 282
struct kvm_s390_itdb {
	__u8	data[256];
283
};
284 285 286

struct sie_page {
	struct kvm_s390_sie_block sie_block;
287 288
	struct mcck_volatile_info mcck_info;	/* 0x0200 */
	__u8 reserved218[1000];		/* 0x0218 */
289
	struct kvm_s390_itdb itdb;	/* 0x0600 */
290
	__u8 reserved700[2304];		/* 0x0700 */
291
};
292

293
struct kvm_vcpu_stat {
294 295 296
	u64 exit_userspace;
	u64 exit_null;
	u64 exit_external_request;
297
	u64 exit_io_request;
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	u64 exit_external_interrupt;
	u64 exit_stop_request;
	u64 exit_validity;
	u64 exit_instruction;
	u64 exit_pei;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 instruction_lctl;
	u64 instruction_lctlg;
	u64 instruction_stctl;
	u64 instruction_stctg;
	u64 exit_program_interruption;
	u64 exit_instr_and_program;
	u64 exit_operation_exception;
	u64 deliver_external_call;
	u64 deliver_emergency_signal;
	u64 deliver_service_signal;
	u64 deliver_virtio_interrupt;
	u64 deliver_stop_signal;
	u64 deliver_prefix_signal;
	u64 deliver_restart_signal;
	u64 deliver_program_int;
	u64 deliver_io_int;
	u64 exit_wait_state;
324 325 326 327 328
	u64 instruction_epsw;
	u64 instruction_gs;
	u64 instruction_io_other;
	u64 instruction_lpsw;
	u64 instruction_lpswe;
329
	u64 instruction_pfmf;
330 331 332
	u64 instruction_ptff;
	u64 instruction_sck;
	u64 instruction_sckpf;
333 334 335 336
	u64 instruction_stidp;
	u64 instruction_spx;
	u64 instruction_stpx;
	u64 instruction_stap;
337 338 339 340
	u64 instruction_iske;
	u64 instruction_ri;
	u64 instruction_rrbe;
	u64 instruction_sske;
341 342 343
	u64 instruction_ipte_interlock;
	u64 instruction_stsi;
	u64 instruction_stfl;
344 345
	u64 instruction_tb;
	u64 instruction_tpi;
346
	u64 instruction_tprot;
347
	u64 instruction_tsch;
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	u64 instruction_sie;
	u64 instruction_essa;
	u64 instruction_sthyi;
	u64 instruction_sigp_sense;
	u64 instruction_sigp_sense_running;
	u64 instruction_sigp_external_call;
	u64 instruction_sigp_emergency;
	u64 instruction_sigp_cond_emergency;
	u64 instruction_sigp_start;
	u64 instruction_sigp_stop;
	u64 instruction_sigp_stop_store_status;
	u64 instruction_sigp_store_status;
	u64 instruction_sigp_store_adtl_status;
	u64 instruction_sigp_arch;
	u64 instruction_sigp_prefix;
	u64 instruction_sigp_restart;
	u64 instruction_sigp_init_cpu_reset;
	u64 instruction_sigp_cpu_reset;
	u64 instruction_sigp_unknown;
	u64 diagnose_10;
	u64 diagnose_44;
	u64 diagnose_9c;
	u64 diagnose_258;
	u64 diagnose_308;
	u64 diagnose_500;
373
	u64 diagnose_other;
374 375
};

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
#define PGM_OPERATION			0x01
#define PGM_PRIVILEGED_OP		0x02
#define PGM_EXECUTE			0x03
#define PGM_PROTECTION			0x04
#define PGM_ADDRESSING			0x05
#define PGM_SPECIFICATION		0x06
#define PGM_DATA			0x07
#define PGM_FIXED_POINT_OVERFLOW	0x08
#define PGM_FIXED_POINT_DIVIDE		0x09
#define PGM_DECIMAL_OVERFLOW		0x0a
#define PGM_DECIMAL_DIVIDE		0x0b
#define PGM_HFP_EXPONENT_OVERFLOW	0x0c
#define PGM_HFP_EXPONENT_UNDERFLOW	0x0d
#define PGM_HFP_SIGNIFICANCE		0x0e
#define PGM_HFP_DIVIDE			0x0f
#define PGM_SEGMENT_TRANSLATION		0x10
#define PGM_PAGE_TRANSLATION		0x11
#define PGM_TRANSLATION_SPEC		0x12
#define PGM_SPECIAL_OPERATION		0x13
#define PGM_OPERAND			0x15
#define PGM_TRACE_TABEL			0x16
E
Eric Farman 已提交
397
#define PGM_VECTOR_PROCESSING		0x1b
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
#define PGM_SPACE_SWITCH		0x1c
#define PGM_HFP_SQUARE_ROOT		0x1d
#define PGM_PC_TRANSLATION_SPEC		0x1f
#define PGM_AFX_TRANSLATION		0x20
#define PGM_ASX_TRANSLATION		0x21
#define PGM_LX_TRANSLATION		0x22
#define PGM_EX_TRANSLATION		0x23
#define PGM_PRIMARY_AUTHORITY		0x24
#define PGM_SECONDARY_AUTHORITY		0x25
#define PGM_LFX_TRANSLATION		0x26
#define PGM_LSX_TRANSLATION		0x27
#define PGM_ALET_SPECIFICATION		0x28
#define PGM_ALEN_TRANSLATION		0x29
#define PGM_ALE_SEQUENCE		0x2a
#define PGM_ASTE_VALIDITY		0x2b
#define PGM_ASTE_SEQUENCE		0x2c
#define PGM_EXTENDED_AUTHORITY		0x2d
#define PGM_LSTE_SEQUENCE		0x2e
#define PGM_ASTE_INSTANCE		0x2f
#define PGM_STACK_FULL			0x30
#define PGM_STACK_EMPTY			0x31
#define PGM_STACK_SPECIFICATION		0x32
#define PGM_STACK_TYPE			0x33
#define PGM_STACK_OPERATION		0x34
#define PGM_ASCE_TYPE			0x38
#define PGM_REGION_FIRST_TRANS		0x39
#define PGM_REGION_SECOND_TRANS		0x3a
#define PGM_REGION_THIRD_TRANS		0x3b
#define PGM_MONITOR			0x40
#define PGM_PER				0x80
#define PGM_CRYPTO_OPERATION		0x119
429

430
/* irq types in ascend order of priorities */
431
enum irq_types {
432
	IRQ_PEND_SET_PREFIX = 0,
433
	IRQ_PEND_RESTART,
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
	IRQ_PEND_SIGP_STOP,
	IRQ_PEND_IO_ISC_7,
	IRQ_PEND_IO_ISC_6,
	IRQ_PEND_IO_ISC_5,
	IRQ_PEND_IO_ISC_4,
	IRQ_PEND_IO_ISC_3,
	IRQ_PEND_IO_ISC_2,
	IRQ_PEND_IO_ISC_1,
	IRQ_PEND_IO_ISC_0,
	IRQ_PEND_VIRTIO,
	IRQ_PEND_PFAULT_DONE,
	IRQ_PEND_PFAULT_INIT,
	IRQ_PEND_EXT_HOST,
	IRQ_PEND_EXT_SERVICE,
	IRQ_PEND_EXT_TIMING,
	IRQ_PEND_EXT_CPU_TIMER,
	IRQ_PEND_EXT_CLOCK_COMP,
	IRQ_PEND_EXT_EXTERNAL,
	IRQ_PEND_EXT_EMERGENCY,
	IRQ_PEND_EXT_MALFUNC,
	IRQ_PEND_EXT_IRQ_KEY,
	IRQ_PEND_MCHK_REP,
	IRQ_PEND_PROG,
	IRQ_PEND_SVC,
	IRQ_PEND_MCHK_EX,
459 460 461
	IRQ_PEND_COUNT
};

462 463 464 465 466
/* We have 2M for virtio device descriptor pages. Smallest amount of
 * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
 */
#define KVM_S390_MAX_VIRTIO_IRQS 87381

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
/*
 * Repressible (non-floating) machine check interrupts
 * subclass bits in MCIC
 */
#define MCHK_EXTD_BIT 58
#define MCHK_DEGR_BIT 56
#define MCHK_WARN_BIT 55
#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
		       (1UL << MCHK_EXTD_BIT) | \
		       (1UL << MCHK_WARN_BIT))

/* Exigent machine check interrupts subclass bits in MCIC */
#define MCHK_SD_BIT 63
#define MCHK_PD_BIT 62
#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))

#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY)    | \
			   (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
			   (1UL << IRQ_PEND_EXT_CPU_TIMER)  | \
			   (1UL << IRQ_PEND_EXT_MALFUNC)    | \
			   (1UL << IRQ_PEND_EXT_EMERGENCY)  | \
			   (1UL << IRQ_PEND_EXT_EXTERNAL)   | \
			   (1UL << IRQ_PEND_EXT_TIMING)     | \
			   (1UL << IRQ_PEND_EXT_HOST)       | \
			   (1UL << IRQ_PEND_EXT_SERVICE)    | \
			   (1UL << IRQ_PEND_VIRTIO)         | \
			   (1UL << IRQ_PEND_PFAULT_INIT)    | \
			   (1UL << IRQ_PEND_PFAULT_DONE))

#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
			  (1UL << IRQ_PEND_IO_ISC_1) | \
			  (1UL << IRQ_PEND_IO_ISC_2) | \
			  (1UL << IRQ_PEND_IO_ISC_3) | \
			  (1UL << IRQ_PEND_IO_ISC_4) | \
			  (1UL << IRQ_PEND_IO_ISC_5) | \
			  (1UL << IRQ_PEND_IO_ISC_6) | \
			  (1UL << IRQ_PEND_IO_ISC_7))

#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
			    (1UL << IRQ_PEND_MCHK_EX))

508
struct kvm_s390_interrupt_info {
509 510 511
	struct list_head list;
	u64	type;
	union {
512 513 514
		struct kvm_s390_io_info io;
		struct kvm_s390_ext_info ext;
		struct kvm_s390_pgm_info pgm;
515
		struct kvm_s390_emerg_info emerg;
516
		struct kvm_s390_extcall_info extcall;
517
		struct kvm_s390_prefix_info prefix;
518
		struct kvm_s390_stop_info stop;
519
		struct kvm_s390_mchk_info mchk;
520 521 522
	};
};

523 524 525 526 527 528 529
struct kvm_s390_irq_payload {
	struct kvm_s390_io_info io;
	struct kvm_s390_ext_info ext;
	struct kvm_s390_pgm_info pgm;
	struct kvm_s390_emerg_info emerg;
	struct kvm_s390_extcall_info extcall;
	struct kvm_s390_prefix_info prefix;
530
	struct kvm_s390_stop_info stop;
531 532 533
	struct kvm_s390_mchk_info mchk;
};

534
struct kvm_s390_local_interrupt {
535
	spinlock_t lock;
536 537 538
	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
	struct kvm_s390_irq_payload irq;
	unsigned long pending_irqs;
539 540
};

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
#define FIRQ_LIST_IO_ISC_0 0
#define FIRQ_LIST_IO_ISC_1 1
#define FIRQ_LIST_IO_ISC_2 2
#define FIRQ_LIST_IO_ISC_3 3
#define FIRQ_LIST_IO_ISC_4 4
#define FIRQ_LIST_IO_ISC_5 5
#define FIRQ_LIST_IO_ISC_6 6
#define FIRQ_LIST_IO_ISC_7 7
#define FIRQ_LIST_PFAULT   8
#define FIRQ_LIST_VIRTIO   9
#define FIRQ_LIST_COUNT   10
#define FIRQ_CNTR_IO       0
#define FIRQ_CNTR_SERVICE  1
#define FIRQ_CNTR_VIRTIO   2
#define FIRQ_CNTR_PFAULT   3
#define FIRQ_MAX_COUNT     4

558 559 560 561 562 563
/* mask the AIS mode for a given ISC */
#define AIS_MODE_MASK(isc) (0x80 >> isc)

#define KVM_S390_AIS_MODE_ALL    0
#define KVM_S390_AIS_MODE_SINGLE 1

564
struct kvm_s390_float_interrupt {
565
	unsigned long pending_irqs;
566
	spinlock_t lock;
567 568 569 570
	struct list_head lists[FIRQ_LIST_COUNT];
	int counters[FIRQ_MAX_COUNT];
	struct kvm_s390_mchk_info mchk;
	struct kvm_s390_ext_info srv_signal;
571
	int next_rr_cpu;
572
	unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
573 574 575
	struct mutex ais_lock;
	u8 simm;
	u8 nimm;
576 577
};

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
struct kvm_hw_wp_info_arch {
	unsigned long addr;
	unsigned long phys_addr;
	int len;
	char *old_data;
};

struct kvm_hw_bp_info_arch {
	unsigned long addr;
	int len;
};

/*
 * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
 * Further KVM_GUESTDBG flags which an be used from userspace can be found in
 * arch/s390/include/uapi/asm/kvm.h
 */
#define KVM_GUESTDBG_EXIT_PENDING 0x10000000

#define guestdbg_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
#define guestdbg_sstep_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
#define guestdbg_hw_bp_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
		(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))

struct kvm_guestdbg_info_arch {
	unsigned long cr0;
	unsigned long cr9;
	unsigned long cr10;
	unsigned long cr11;
	struct kvm_hw_bp_info_arch *hw_bp_info;
	struct kvm_hw_wp_info_arch *hw_wp_info;
	int nr_hw_bp;
	int nr_hw_wp;
	unsigned long last_bp;
};
617

618
struct kvm_vcpu_arch {
619
	struct kvm_s390_sie_block *sie_block;
620 621
	/* if vsie is active, currently executed shadow sie control block */
	struct kvm_s390_sie_block *vsie_block;
622
	unsigned int      host_acrs[NUM_ACRS];
F
Fan Zhang 已提交
623
	struct gs_cb      *host_gscb;
624
	struct fpu	  host_fpregs;
625
	struct kvm_s390_local_interrupt local_int;
626
	struct hrtimer    ckc_timer;
627
	struct kvm_s390_pgm_info pgm;
628
	struct gmap *gmap;
629 630
	/* backup location for the currently enabled gmap when scheduled out */
	struct gmap *enabled_gmap;
631
	struct kvm_guestdbg_info_arch guestdbg;
632 633 634
	unsigned long pfault_token;
	unsigned long pfault_select;
	unsigned long pfault_compare;
635
	bool cputm_enabled;
636 637 638 639 640 641 642
	/*
	 * The seqcount protects updates to cputm_start and sie_block.cputm,
	 * this way we can have non-blocking reads with consistent values.
	 * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
	 * values and to start/stop/enable/disable cpu timer accounting.
	 */
	seqcount_t cputm_seqcount;
643
	__u64 cputm_start;
F
Fan Zhang 已提交
644
	bool gs_enabled;
645 646 647
};

struct kvm_vm_stat {
648
	ulong remote_tlb_flush;
649 650
};

651 652 653
struct kvm_arch_memory_slot {
};

654 655 656 657 658 659 660 661 662 663 664 665 666
struct s390_map_info {
	struct list_head list;
	__u64 guest_addr;
	__u64 addr;
	struct page *page;
};

struct s390_io_adapter {
	unsigned int id;
	int isc;
	bool maskable;
	bool masked;
	bool swap;
667
	bool suppressible;
668 669 670 671 672 673 674 675
	struct rw_semaphore maps_lock;
	struct list_head maps;
	atomic_t nr_maps;
};

#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256

676 677 678 679 680 681 682 683 684
/* maximum size of facilities and facility mask is 2k bytes */
#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
#define S390_ARCH_FAC_LIST_SIZE_U64 \
	(S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
#define S390_ARCH_FAC_MASK_SIZE_U64 \
	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))

struct kvm_s390_cpu_model {
685 686 687 688
	/* facility mask supported by kvm & hosting machine */
	__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
	/* facility list requested by guest (in dma page) */
	__u64 *fac_list;
689
	u64 cpuid;
690
	unsigned short ibc;
691 692
};

693 694 695
struct kvm_s390_crypto {
	struct kvm_s390_crypto_cb *crycb;
	__u32 crycbd;
696 697
	__u8 aes_kw;
	__u8 dea_kw;
698 699
};

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
#define APCB0_MASK_SIZE 1
struct kvm_s390_apcb0 {
	__u64 apm[APCB0_MASK_SIZE];		/* 0x0000 */
	__u64 aqm[APCB0_MASK_SIZE];		/* 0x0008 */
	__u64 adm[APCB0_MASK_SIZE];		/* 0x0010 */
	__u64 reserved18;			/* 0x0018 */
};

#define APCB1_MASK_SIZE 4
struct kvm_s390_apcb1 {
	__u64 apm[APCB1_MASK_SIZE];		/* 0x0000 */
	__u64 aqm[APCB1_MASK_SIZE];		/* 0x0020 */
	__u64 adm[APCB1_MASK_SIZE];		/* 0x0040 */
	__u64 reserved60[4];			/* 0x0060 */
};

716
struct kvm_s390_crypto_cb {
717 718 719 720 721
	struct kvm_s390_apcb0 apcb0;		/* 0x0000 */
	__u8   reserved20[0x0048 - 0x0020];	/* 0x0020 */
	__u8   dea_wrapping_key_mask[24];	/* 0x0048 */
	__u8   aes_wrapping_key_mask[32];	/* 0x0060 */
	struct kvm_s390_apcb1 apcb1;		/* 0x0080 */
722 723
};

724
struct kvm_s390_gisa {
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	union {
		struct { /* common to all formats */
			u32 next_alert;
			u8  ipm;
			u8  reserved01[2];
			u8  iam;
		};
		struct { /* format 0 */
			u32 next_alert;
			u8  ipm;
			u8  reserved01;
			u8  : 6;
			u8  g : 1;
			u8  c : 1;
			u8  iam;
			u8  reserved02[4];
			u32 airq_count;
		} g0;
		struct { /* format 1 */
			u32 next_alert;
			u8  ipm;
			u8  simm;
			u8  nimm;
			u8  iam;
			u8  aism[8];
			u8  : 6;
			u8  g : 1;
			u8  c : 1;
			u8  reserved03[11];
			u32 airq_count;
		} g1;
	};
757 758
};

759
/*
760 761
 * sie_page2 has to be allocated as DMA because fac_list, crycb and
 * gisa need 31bit addresses in the sie control block.
762 763 764 765
 */
struct sie_page2 {
	__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64];	/* 0x0000 */
	struct kvm_s390_crypto_cb crycb;		/* 0x0800 */
766
	struct kvm_s390_gisa gisa;			/* 0x0900 */
767
	u8 reserved920[0x1000 - 0x920];			/* 0x0920 */
768
};
769

770 771 772 773 774 775 776 777
struct kvm_s390_vsie {
	struct mutex mutex;
	struct radix_tree_root addr_to_page;
	int page_count;
	int next;
	struct page *pages[KVM_MAX_VCPUS];
};

778 779 780 781 782 783
struct kvm_s390_migration_state {
	unsigned long bitmap_size;	/* in bits (number of guest pages) */
	atomic64_t dirty_pages;		/* number of dirty pages */
	unsigned long *pgste_bitmap;
};

784
struct kvm_arch{
785 786
	void *sca;
	int use_esca;
787
	rwlock_t sca_lock;
788
	debug_info_t *dbf;
789
	struct kvm_s390_float_interrupt float_int;
790
	struct kvm_device *flic;
791
	struct gmap *gmap;
792
	unsigned long mem_limit;
793
	int css_support;
794
	int use_irqchip;
795
	int use_cmma;
796
	int use_pfmfi;
797
	int user_cpu_state_ctrl;
798
	int user_sigp;
799
	int user_stsi;
800
	int user_instr0;
801
	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
802
	wait_queue_head_t ipte_wq;
803 804
	int ipte_lock_count;
	struct mutex ipte_mutex;
805
	spinlock_t start_stop_lock;
806
	struct sie_page2 *sie_page2;
807
	struct kvm_s390_cpu_model model;
808
	struct kvm_s390_crypto crypto;
809
	struct kvm_s390_vsie vsie;
810
	u8 epdx;
811
	u64 epoch;
812
	struct kvm_s390_migration_state *migration_state;
813 814
	/* subset of available cpu features enabled by user space */
	DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
815
	struct kvm_s390_gisa *gisa;
816 817
};

818 819 820 821 822 823 824 825
#define KVM_HVA_ERR_BAD		(-1UL)
#define KVM_HVA_ERR_RO_BAD	(-2UL)

static inline bool kvm_is_error_hva(unsigned long addr)
{
	return IS_ERR_VALUE(addr);
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
#define ASYNC_PF_PER_VCPU	64
struct kvm_arch_async_pf {
	unsigned long pfault_token;
};

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);

842
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
843
extern char sie_exit;
844

845
static inline void kvm_arch_hardware_disable(void) {}
846 847 848 849 850 851
static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
852
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
853 854 855
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
		struct kvm_memory_slot *slot) {}
856 857
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
858

859 860
void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);

861
#endif