kvm_host.h 18.1 KB
Newer Older
1
/*
2
 * definition for kernel virtual machines on s390
3
 *
4
 * Copyright IBM Corp. 2008, 2009
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 */


#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
16 17

#include <linux/types.h>
18 19
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
20
#include <linux/kvm_types.h>
21
#include <linux/kvm_host.h>
22
#include <linux/kvm.h>
23
#include <asm/debug.h>
24
#include <asm/cpu.h>
25
#include <asm/fpu/api.h>
26
#include <asm/isc.h>
27

28 29
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
30
#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
31
#define KVM_USER_MEM_SLOTS 32
32

33 34 35 36 37 38 39
/*
 * These seem to be used for allocating ->chip in the routing table,
 * which we don't use. 4096 is an out-of-thin-air value. If we need
 * to look at ->chip later on, we'll need to revisit this.
 */
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
40
#define KVM_HALT_POLL_NS_DEFAULT 0
41

42 43
#define SIGP_CTRL_C		0x80
#define SIGP_CTRL_SCN_MASK	0x3f
44

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
union bsca_sigp_ctrl {
	__u8 value;
	struct {
		__u8 c : 1;
		__u8 r : 1;
		__u8 scn : 6;
	};
} __packed;

union esca_sigp_ctrl {
	__u16 value;
	struct {
		__u8 c : 1;
		__u8 reserved: 7;
		__u8 scn;
	};
} __packed;

struct esca_entry {
	union esca_sigp_ctrl sigp_ctrl;
	__u16   reserved1[3];
	__u64   sda;
	__u64   reserved2[6];
} __packed;

struct bsca_entry {
71
	__u8	reserved0;
72
	union bsca_sigp_ctrl	sigp_ctrl;
73
	__u16	reserved[3];
74 75 76 77
	__u64	sda;
	__u64	reserved2[2];
} __attribute__((packed));

78 79 80 81 82 83 84 85
union ipte_control {
	unsigned long val;
	struct {
		unsigned long k  : 1;
		unsigned long kh : 31;
		unsigned long kg : 32;
	};
};
86

87
struct bsca_block {
88
	union ipte_control ipte_control;
89 90 91
	__u64	reserved[5];
	__u64	mcn;
	__u64	reserved2;
92
	struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
93 94
} __attribute__((packed));

95 96 97 98 99 100 101 102
struct esca_block {
	union ipte_control ipte_control;
	__u64   reserved1[7];
	__u64   mcn[4];
	__u64   reserved2[20];
	struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
} __packed;

103
#define CPUSTAT_STOPPED    0x80000000
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
#define CPUSTAT_WAIT       0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT   0x04000000
#define CPUSTAT_IO_INT     0x02000000
#define CPUSTAT_EXT_INT    0x01000000
#define CPUSTAT_RUNNING    0x00800000
#define CPUSTAT_RETAINED   0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB    0x00010000
#define CPUSTAT_RRF        0x00008000
#define CPUSTAT_SLSV       0x00004000
#define CPUSTAT_SLSR       0x00002000
#define CPUSTAT_ZARCH      0x00000800
#define CPUSTAT_MCDS       0x00000100
#define CPUSTAT_SM         0x00000080
119
#define CPUSTAT_IBS        0x00000040
120
#define CPUSTAT_GED2       0x00000010
121
#define CPUSTAT_G          0x00000008
122
#define CPUSTAT_GED        0x00000004
123 124 125
#define CPUSTAT_J          0x00000002
#define CPUSTAT_P          0x00000001

126
struct kvm_s390_sie_block {
127
	atomic_t cpuflags;		/* 0x0000 */
128 129
	__u32 : 1;			/* 0x0004 */
	__u32 prefix : 18;
130 131
	__u32 : 1;
	__u32 ibc : 12;
132 133 134
	__u8	reserved08[4];		/* 0x0008 */
#define PROG_IN_SIE (1<<0)
	__u32	prog0c;			/* 0x000c */
135
	__u8	reserved10[16];		/* 0x0010 */
136 137
#define PROG_BLOCK_SIE	(1<<0)
#define PROG_REQUEST	(1<<1)
138 139
	atomic_t prog20;		/* 0x0020 */
	__u8	reserved24[4];		/* 0x0024 */
140 141 142 143
	__u64	cputm;			/* 0x0028 */
	__u64	ckc;			/* 0x0030 */
	__u64	epoch;			/* 0x0038 */
	__u8	reserved40[4];		/* 0x0040 */
144
#define LCTL_CR0	0x8000
145
#define LCTL_CR6	0x0200
146 147 148
#define LCTL_CR9	0x0040
#define LCTL_CR10	0x0020
#define LCTL_CR11	0x0010
149
#define LCTL_CR14	0x0002
150 151
	__u16   lctl;			/* 0x0044 */
	__s16	icpua;			/* 0x0046 */
152 153 154 155 156 157
#define ICTL_PINT	0x20000000
#define ICTL_LPSW	0x00400000
#define ICTL_STCTL	0x00040000
#define ICTL_ISKE	0x00004000
#define ICTL_SSKE	0x00002000
#define ICTL_RRBE	0x00001000
158
#define ICTL_TPROT	0x00000200
159 160
	__u32	ictl;			/* 0x0048 */
	__u32	eca;			/* 0x004c */
161 162 163 164 165 166
#define ICPT_INST	0x04
#define ICPT_PROGI	0x08
#define ICPT_INSTPROGI	0x0C
#define ICPT_OPEREXC	0x2C
#define ICPT_PARTEXEC	0x38
#define ICPT_IOINST	0x40
167
	__u8	icptcode;		/* 0x0050 */
168
	__u8	icptstatus;		/* 0x0051 */
169 170 171 172 173 174 175
	__u16	ihcpu;			/* 0x0052 */
	__u8	reserved54[2];		/* 0x0054 */
	__u16	ipa;			/* 0x0056 */
	__u32	ipb;			/* 0x0058 */
	__u32	scaoh;			/* 0x005c */
	__u8	reserved60;		/* 0x0060 */
	__u8	ecb;			/* 0x0061 */
176
	__u8    ecb2;                   /* 0x0062 */
177 178 179
#define ECB3_AES 0x04
#define ECB3_DEA 0x08
	__u8    ecb3;			/* 0x0063 */
180 181 182
	__u32	scaol;			/* 0x0064 */
	__u8	reserved68[4];		/* 0x0068 */
	__u32	todpr;			/* 0x006c */
183
	__u8	reserved70[32];		/* 0x0070 */
184 185 186
	psw_t	gpsw;			/* 0x0090 */
	__u64	gg14;			/* 0x00a0 */
	__u64	gg15;			/* 0x00a8 */
187 188 189 190
	__u8	reservedb0[20];		/* 0x00b0 */
	__u16	extcpuaddr;		/* 0x00c4 */
	__u16	eic;			/* 0x00c6 */
	__u32	reservedc8;		/* 0x00c8 */
191 192 193 194 195 196 197 198 199 200 201 202 203
	__u16	pgmilc;			/* 0x00cc */
	__u16	iprcc;			/* 0x00ce */
	__u32	dxc;			/* 0x00d0 */
	__u16	mcn;			/* 0x00d4 */
	__u8	perc;			/* 0x00d6 */
	__u8	peratmid;		/* 0x00d7 */
	__u64	peraddr;		/* 0x00d8 */
	__u8	eai;			/* 0x00e0 */
	__u8	peraid;			/* 0x00e1 */
	__u8	oai;			/* 0x00e2 */
	__u8	armid;			/* 0x00e3 */
	__u8	reservede4[4];		/* 0x00e4 */
	__u64	tecmc;			/* 0x00e8 */
204 205
	__u8	reservedf0[12];		/* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
206
#define CRYCB_FORMAT2 0x00000003
207
	__u32	crycbd;			/* 0x00fc */
208 209
	__u64	gcr[16];		/* 0x0100 */
	__u64	gbea;			/* 0x0180 */
210 211
	__u8	reserved188[24];	/* 0x0188 */
	__u32	fac;			/* 0x01a0 */
212 213
	__u8	reserved1a4[20];	/* 0x01a4 */
	__u64	cbrlo;			/* 0x01b8 */
214 215 216
	__u8	reserved1c0[8];		/* 0x01c0 */
	__u32	ecd;			/* 0x01c8 */
	__u8	reserved1cc[18];	/* 0x01cc */
217 218
	__u64	pp;			/* 0x01de */
	__u8	reserved1e6[2];		/* 0x01e6 */
219 220
	__u64	itdba;			/* 0x01e8 */
	__u8	reserved1f0[16];	/* 0x01f0 */
221 222
} __attribute__((packed));

223 224 225 226
struct kvm_s390_itdb {
	__u8	data[256];
} __packed;

227 228 229 230 231
struct kvm_s390_vregs {
	__vector128 vrs[32];
	__u8	reserved200[512];	/* for future vector expansion */
} __packed;

232 233 234 235
struct sie_page {
	struct kvm_s390_sie_block sie_block;
	__u8 reserved200[1024];		/* 0x0200 */
	struct kvm_s390_itdb itdb;	/* 0x0600 */
236 237
	__u8 reserved700[1280];		/* 0x0700 */
	struct kvm_s390_vregs vregs;	/* 0x0c00 */
238 239
} __packed;

240 241
struct kvm_vcpu_stat {
	u32 exit_userspace;
242
	u32 exit_null;
243 244 245 246
	u32 exit_external_request;
	u32 exit_external_interrupt;
	u32 exit_stop_request;
	u32 exit_validity;
247
	u32 exit_instruction;
248
	u32 halt_successful_poll;
249
	u32 halt_attempted_poll;
250
	u32 halt_wakeup;
251
	u32 instruction_lctl;
252
	u32 instruction_lctlg;
253 254
	u32 instruction_stctl;
	u32 instruction_stctg;
255 256
	u32 exit_program_interruption;
	u32 exit_instr_and_program;
257
	u32 deliver_external_call;
258 259 260 261 262 263 264
	u32 deliver_emergency_signal;
	u32 deliver_service_signal;
	u32 deliver_virtio_interrupt;
	u32 deliver_stop_signal;
	u32 deliver_prefix_signal;
	u32 deliver_restart_signal;
	u32 deliver_program_int;
265
	u32 deliver_io_int;
266
	u32 exit_wait_state;
267
	u32 instruction_pfmf;
268 269 270 271 272
	u32 instruction_stidp;
	u32 instruction_spx;
	u32 instruction_stpx;
	u32 instruction_stap;
	u32 instruction_storage_key;
273
	u32 instruction_ipte_interlock;
274 275 276 277
	u32 instruction_stsch;
	u32 instruction_chsc;
	u32 instruction_stsi;
	u32 instruction_stfl;
278
	u32 instruction_tprot;
279
	u32 instruction_essa;
280
	u32 instruction_sigp_sense;
281
	u32 instruction_sigp_sense_running;
282
	u32 instruction_sigp_external_call;
283
	u32 instruction_sigp_emergency;
284 285
	u32 instruction_sigp_cond_emergency;
	u32 instruction_sigp_start;
286
	u32 instruction_sigp_stop;
287 288
	u32 instruction_sigp_stop_store_status;
	u32 instruction_sigp_store_status;
289
	u32 instruction_sigp_store_adtl_status;
290 291 292
	u32 instruction_sigp_arch;
	u32 instruction_sigp_prefix;
	u32 instruction_sigp_restart;
293 294 295
	u32 instruction_sigp_init_cpu_reset;
	u32 instruction_sigp_cpu_reset;
	u32 instruction_sigp_unknown;
296
	u32 diagnose_10;
297
	u32 diagnose_44;
298
	u32 diagnose_9c;
299 300 301
	u32 diagnose_258;
	u32 diagnose_308;
	u32 diagnose_500;
302 303
};

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
#define PGM_OPERATION			0x01
#define PGM_PRIVILEGED_OP		0x02
#define PGM_EXECUTE			0x03
#define PGM_PROTECTION			0x04
#define PGM_ADDRESSING			0x05
#define PGM_SPECIFICATION		0x06
#define PGM_DATA			0x07
#define PGM_FIXED_POINT_OVERFLOW	0x08
#define PGM_FIXED_POINT_DIVIDE		0x09
#define PGM_DECIMAL_OVERFLOW		0x0a
#define PGM_DECIMAL_DIVIDE		0x0b
#define PGM_HFP_EXPONENT_OVERFLOW	0x0c
#define PGM_HFP_EXPONENT_UNDERFLOW	0x0d
#define PGM_HFP_SIGNIFICANCE		0x0e
#define PGM_HFP_DIVIDE			0x0f
#define PGM_SEGMENT_TRANSLATION		0x10
#define PGM_PAGE_TRANSLATION		0x11
#define PGM_TRANSLATION_SPEC		0x12
#define PGM_SPECIAL_OPERATION		0x13
#define PGM_OPERAND			0x15
#define PGM_TRACE_TABEL			0x16
E
Eric Farman 已提交
325
#define PGM_VECTOR_PROCESSING		0x1b
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
#define PGM_SPACE_SWITCH		0x1c
#define PGM_HFP_SQUARE_ROOT		0x1d
#define PGM_PC_TRANSLATION_SPEC		0x1f
#define PGM_AFX_TRANSLATION		0x20
#define PGM_ASX_TRANSLATION		0x21
#define PGM_LX_TRANSLATION		0x22
#define PGM_EX_TRANSLATION		0x23
#define PGM_PRIMARY_AUTHORITY		0x24
#define PGM_SECONDARY_AUTHORITY		0x25
#define PGM_LFX_TRANSLATION		0x26
#define PGM_LSX_TRANSLATION		0x27
#define PGM_ALET_SPECIFICATION		0x28
#define PGM_ALEN_TRANSLATION		0x29
#define PGM_ALE_SEQUENCE		0x2a
#define PGM_ASTE_VALIDITY		0x2b
#define PGM_ASTE_SEQUENCE		0x2c
#define PGM_EXTENDED_AUTHORITY		0x2d
#define PGM_LSTE_SEQUENCE		0x2e
#define PGM_ASTE_INSTANCE		0x2f
#define PGM_STACK_FULL			0x30
#define PGM_STACK_EMPTY			0x31
#define PGM_STACK_SPECIFICATION		0x32
#define PGM_STACK_TYPE			0x33
#define PGM_STACK_OPERATION		0x34
#define PGM_ASCE_TYPE			0x38
#define PGM_REGION_FIRST_TRANS		0x39
#define PGM_REGION_SECOND_TRANS		0x3a
#define PGM_REGION_THIRD_TRANS		0x3b
#define PGM_MONITOR			0x40
#define PGM_PER				0x80
#define PGM_CRYPTO_OPERATION		0x119
357

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
/* irq types in order of priority */
enum irq_types {
	IRQ_PEND_MCHK_EX = 0,
	IRQ_PEND_SVC,
	IRQ_PEND_PROG,
	IRQ_PEND_MCHK_REP,
	IRQ_PEND_EXT_IRQ_KEY,
	IRQ_PEND_EXT_MALFUNC,
	IRQ_PEND_EXT_EMERGENCY,
	IRQ_PEND_EXT_EXTERNAL,
	IRQ_PEND_EXT_CLOCK_COMP,
	IRQ_PEND_EXT_CPU_TIMER,
	IRQ_PEND_EXT_TIMING,
	IRQ_PEND_EXT_SERVICE,
	IRQ_PEND_EXT_HOST,
	IRQ_PEND_PFAULT_INIT,
	IRQ_PEND_PFAULT_DONE,
	IRQ_PEND_VIRTIO,
	IRQ_PEND_IO_ISC_0,
	IRQ_PEND_IO_ISC_1,
	IRQ_PEND_IO_ISC_2,
	IRQ_PEND_IO_ISC_3,
	IRQ_PEND_IO_ISC_4,
	IRQ_PEND_IO_ISC_5,
	IRQ_PEND_IO_ISC_6,
	IRQ_PEND_IO_ISC_7,
	IRQ_PEND_SIGP_STOP,
	IRQ_PEND_RESTART,
	IRQ_PEND_SET_PREFIX,
	IRQ_PEND_COUNT
};

390 391 392 393 394
/* We have 2M for virtio device descriptor pages. Smallest amount of
 * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
 */
#define KVM_S390_MAX_VIRTIO_IRQS 87381

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/*
 * Repressible (non-floating) machine check interrupts
 * subclass bits in MCIC
 */
#define MCHK_EXTD_BIT 58
#define MCHK_DEGR_BIT 56
#define MCHK_WARN_BIT 55
#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
		       (1UL << MCHK_EXTD_BIT) | \
		       (1UL << MCHK_WARN_BIT))

/* Exigent machine check interrupts subclass bits in MCIC */
#define MCHK_SD_BIT 63
#define MCHK_PD_BIT 62
#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))

#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY)    | \
			   (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
			   (1UL << IRQ_PEND_EXT_CPU_TIMER)  | \
			   (1UL << IRQ_PEND_EXT_MALFUNC)    | \
			   (1UL << IRQ_PEND_EXT_EMERGENCY)  | \
			   (1UL << IRQ_PEND_EXT_EXTERNAL)   | \
			   (1UL << IRQ_PEND_EXT_TIMING)     | \
			   (1UL << IRQ_PEND_EXT_HOST)       | \
			   (1UL << IRQ_PEND_EXT_SERVICE)    | \
			   (1UL << IRQ_PEND_VIRTIO)         | \
			   (1UL << IRQ_PEND_PFAULT_INIT)    | \
			   (1UL << IRQ_PEND_PFAULT_DONE))

#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
			  (1UL << IRQ_PEND_IO_ISC_1) | \
			  (1UL << IRQ_PEND_IO_ISC_2) | \
			  (1UL << IRQ_PEND_IO_ISC_3) | \
			  (1UL << IRQ_PEND_IO_ISC_4) | \
			  (1UL << IRQ_PEND_IO_ISC_5) | \
			  (1UL << IRQ_PEND_IO_ISC_6) | \
			  (1UL << IRQ_PEND_IO_ISC_7))

#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
			    (1UL << IRQ_PEND_MCHK_EX))

436
struct kvm_s390_interrupt_info {
437 438 439
	struct list_head list;
	u64	type;
	union {
440 441 442
		struct kvm_s390_io_info io;
		struct kvm_s390_ext_info ext;
		struct kvm_s390_pgm_info pgm;
443
		struct kvm_s390_emerg_info emerg;
444
		struct kvm_s390_extcall_info extcall;
445
		struct kvm_s390_prefix_info prefix;
446
		struct kvm_s390_stop_info stop;
447
		struct kvm_s390_mchk_info mchk;
448 449 450
	};
};

451 452 453 454 455 456 457
struct kvm_s390_irq_payload {
	struct kvm_s390_io_info io;
	struct kvm_s390_ext_info ext;
	struct kvm_s390_pgm_info pgm;
	struct kvm_s390_emerg_info emerg;
	struct kvm_s390_extcall_info extcall;
	struct kvm_s390_prefix_info prefix;
458
	struct kvm_s390_stop_info stop;
459 460 461
	struct kvm_s390_mchk_info mchk;
};

462
struct kvm_s390_local_interrupt {
463
	spinlock_t lock;
464
	struct kvm_s390_float_interrupt *float_int;
465
	wait_queue_head_t *wq;
466
	atomic_t *cpuflags;
467 468 469
	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
	struct kvm_s390_irq_payload irq;
	unsigned long pending_irqs;
470 471
};

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
#define FIRQ_LIST_IO_ISC_0 0
#define FIRQ_LIST_IO_ISC_1 1
#define FIRQ_LIST_IO_ISC_2 2
#define FIRQ_LIST_IO_ISC_3 3
#define FIRQ_LIST_IO_ISC_4 4
#define FIRQ_LIST_IO_ISC_5 5
#define FIRQ_LIST_IO_ISC_6 6
#define FIRQ_LIST_IO_ISC_7 7
#define FIRQ_LIST_PFAULT   8
#define FIRQ_LIST_VIRTIO   9
#define FIRQ_LIST_COUNT   10
#define FIRQ_CNTR_IO       0
#define FIRQ_CNTR_SERVICE  1
#define FIRQ_CNTR_VIRTIO   2
#define FIRQ_CNTR_PFAULT   3
#define FIRQ_MAX_COUNT     4

489
struct kvm_s390_float_interrupt {
490
	unsigned long pending_irqs;
491
	spinlock_t lock;
492 493 494 495
	struct list_head lists[FIRQ_LIST_COUNT];
	int counters[FIRQ_MAX_COUNT];
	struct kvm_s390_mchk_info mchk;
	struct kvm_s390_ext_info srv_signal;
496
	int next_rr_cpu;
497
	unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
498 499
};

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
struct kvm_hw_wp_info_arch {
	unsigned long addr;
	unsigned long phys_addr;
	int len;
	char *old_data;
};

struct kvm_hw_bp_info_arch {
	unsigned long addr;
	int len;
};

/*
 * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
 * Further KVM_GUESTDBG flags which an be used from userspace can be found in
 * arch/s390/include/uapi/asm/kvm.h
 */
#define KVM_GUESTDBG_EXIT_PENDING 0x10000000

#define guestdbg_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
#define guestdbg_sstep_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
#define guestdbg_hw_bp_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
		(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))

struct kvm_guestdbg_info_arch {
	unsigned long cr0;
	unsigned long cr9;
	unsigned long cr10;
	unsigned long cr11;
	struct kvm_hw_bp_info_arch *hw_bp_info;
	struct kvm_hw_wp_info_arch *hw_wp_info;
	int nr_hw_bp;
	int nr_hw_wp;
	unsigned long last_bp;
};
539

540
struct kvm_vcpu_arch {
541
	struct kvm_s390_sie_block *sie_block;
542
	unsigned int      host_acrs[NUM_ACRS];
543 544
	struct fpu	  host_fpregs;
	struct fpu	  guest_fpregs;
545
	struct kvm_s390_local_interrupt local_int;
546
	struct hrtimer    ckc_timer;
547
	struct kvm_s390_pgm_info pgm;
548
	union  {
549 550
		struct cpuid	cpu_id;
		u64		stidp_data;
551
	};
552
	struct gmap *gmap;
553
	struct kvm_guestdbg_info_arch guestdbg;
554 555 556
	unsigned long pfault_token;
	unsigned long pfault_select;
	unsigned long pfault_compare;
557 558 559 560 561 562
};

struct kvm_vm_stat {
	u32 remote_tlb_flush;
};

563 564 565
struct kvm_arch_memory_slot {
};

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
struct s390_map_info {
	struct list_head list;
	__u64 guest_addr;
	__u64 addr;
	struct page *page;
};

struct s390_io_adapter {
	unsigned int id;
	int isc;
	bool maskable;
	bool masked;
	bool swap;
	struct rw_semaphore maps_lock;
	struct list_head maps;
	atomic_t nr_maps;
};

#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256

587 588 589 590 591 592 593 594
/* maximum size of facilities and facility mask is 2k bytes */
#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
#define S390_ARCH_FAC_LIST_SIZE_U64 \
	(S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
#define S390_ARCH_FAC_MASK_SIZE_U64 \
	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))

595 596 597 598 599
struct kvm_s390_fac {
	/* facility list requested by guest */
	__u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
	/* facility mask supported by kvm & hosting machine */
	__u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
600 601 602
};

struct kvm_s390_cpu_model {
603
	struct kvm_s390_fac *fac;
604
	struct cpuid cpu_id;
605
	unsigned short ibc;
606 607
};

608 609 610
struct kvm_s390_crypto {
	struct kvm_s390_crypto_cb *crycb;
	__u32 crycbd;
611 612
	__u8 aes_kw;
	__u8 dea_kw;
613 614 615
};

struct kvm_s390_crypto_cb {
616 617 618
	__u8    reserved00[72];                 /* 0x0000 */
	__u8    dea_wrapping_key_mask[24];      /* 0x0048 */
	__u8    aes_wrapping_key_mask[32];      /* 0x0060 */
619
	__u8    reserved80[128];                /* 0x0080 */
620 621
};

622
struct kvm_arch{
623 624
	void *sca;
	int use_esca;
625
	rwlock_t sca_lock;
626
	debug_info_t *dbf;
627
	struct kvm_s390_float_interrupt float_int;
628
	struct kvm_device *flic;
629
	struct gmap *gmap;
630
	int css_support;
631
	int use_irqchip;
632
	int use_cmma;
633
	int user_cpu_state_ctrl;
634
	int user_sigp;
635
	int user_stsi;
636
	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
637
	wait_queue_head_t ipte_wq;
638 639
	int ipte_lock_count;
	struct mutex ipte_mutex;
640
	spinlock_t start_stop_lock;
641
	struct kvm_s390_cpu_model model;
642
	struct kvm_s390_crypto crypto;
643
	u64 epoch;
644 645
};

646 647 648 649 650 651 652 653
#define KVM_HVA_ERR_BAD		(-1UL)
#define KVM_HVA_ERR_RO_BAD	(-2UL)

static inline bool kvm_is_error_hva(unsigned long addr)
{
	return IS_ERR_VALUE(addr);
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
#define ASYNC_PF_PER_VCPU	64
struct kvm_arch_async_pf {
	unsigned long pfault_token;
};

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);

670
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
671
extern char sie_exit;
672

673
static inline void kvm_arch_hardware_disable(void) {}
674 675 676 677 678 679
static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
680
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
681 682 683
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
		struct kvm_memory_slot *slot) {}
684 685
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
686

687
#endif