kvm_host.h 20.0 KB
Newer Older
1
/*
2
 * definition for kernel virtual machines on s390
3
 *
4
 * Copyright IBM Corp. 2008, 2009
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License (version 2 only)
 * as published by the Free Software Foundation.
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 */


#ifndef ASM_KVM_HOST_H
#define ASM_KVM_HOST_H
16 17

#include <linux/types.h>
18 19
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
20
#include <linux/kvm_types.h>
21
#include <linux/kvm_host.h>
22
#include <linux/kvm.h>
23
#include <linux/seqlock.h>
24
#include <asm/debug.h>
25
#include <asm/cpu.h>
26
#include <asm/fpu/api.h>
27
#include <asm/isc.h>
28

29 30
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
31
#define KVM_MAX_VCPUS 255
32
#define KVM_USER_MEM_SLOTS 32
33

34 35 36 37 38 39 40
/*
 * These seem to be used for allocating ->chip in the routing table,
 * which we don't use. 4096 is an out-of-thin-air value. If we need
 * to look at ->chip later on, we'll need to revisit this.
 */
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096
41
#define KVM_HALT_POLL_NS_DEFAULT 80000
42

43 44 45
/* s390-specific vcpu->requests bit members */
#define KVM_REQ_ENABLE_IBS         8
#define KVM_REQ_DISABLE_IBS        9
46
#define KVM_REQ_ICPT_OPEREXC       10
47

48 49
#define SIGP_CTRL_C		0x80
#define SIGP_CTRL_SCN_MASK	0x3f
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
union bsca_sigp_ctrl {
	__u8 value;
	struct {
		__u8 c : 1;
		__u8 r : 1;
		__u8 scn : 6;
	};
} __packed;

union esca_sigp_ctrl {
	__u16 value;
	struct {
		__u8 c : 1;
		__u8 reserved: 7;
		__u8 scn;
	};
} __packed;

struct esca_entry {
	union esca_sigp_ctrl sigp_ctrl;
	__u16   reserved1[3];
	__u64   sda;
	__u64   reserved2[6];
} __packed;

struct bsca_entry {
77
	__u8	reserved0;
78
	union bsca_sigp_ctrl	sigp_ctrl;
79
	__u16	reserved[3];
80 81 82 83
	__u64	sda;
	__u64	reserved2[2];
} __attribute__((packed));

84 85 86 87 88 89 90 91
union ipte_control {
	unsigned long val;
	struct {
		unsigned long k  : 1;
		unsigned long kh : 31;
		unsigned long kg : 32;
	};
};
92

93
struct bsca_block {
94
	union ipte_control ipte_control;
95 96 97
	__u64	reserved[5];
	__u64	mcn;
	__u64	reserved2;
98
	struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
99 100
} __attribute__((packed));

101 102 103 104 105 106 107 108
struct esca_block {
	union ipte_control ipte_control;
	__u64   reserved1[7];
	__u64   mcn[4];
	__u64   reserved2[20];
	struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
} __packed;

109
#define CPUSTAT_STOPPED    0x80000000
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
#define CPUSTAT_WAIT       0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
#define CPUSTAT_STOP_INT   0x04000000
#define CPUSTAT_IO_INT     0x02000000
#define CPUSTAT_EXT_INT    0x01000000
#define CPUSTAT_RUNNING    0x00800000
#define CPUSTAT_RETAINED   0x00400000
#define CPUSTAT_TIMING_SUB 0x00020000
#define CPUSTAT_SIE_SUB    0x00010000
#define CPUSTAT_RRF        0x00008000
#define CPUSTAT_SLSV       0x00004000
#define CPUSTAT_SLSR       0x00002000
#define CPUSTAT_ZARCH      0x00000800
#define CPUSTAT_MCDS       0x00000100
#define CPUSTAT_SM         0x00000080
125
#define CPUSTAT_IBS        0x00000040
126
#define CPUSTAT_GED2       0x00000010
127
#define CPUSTAT_G          0x00000008
128
#define CPUSTAT_GED        0x00000004
129 130 131
#define CPUSTAT_J          0x00000002
#define CPUSTAT_P          0x00000001

132
struct kvm_s390_sie_block {
133
	atomic_t cpuflags;		/* 0x0000 */
134 135
	__u32 : 1;			/* 0x0004 */
	__u32 prefix : 18;
136 137
	__u32 : 1;
	__u32 ibc : 12;
138 139 140
	__u8	reserved08[4];		/* 0x0008 */
#define PROG_IN_SIE (1<<0)
	__u32	prog0c;			/* 0x000c */
141
	__u8	reserved10[16];		/* 0x0010 */
142 143
#define PROG_BLOCK_SIE	(1<<0)
#define PROG_REQUEST	(1<<1)
144 145
	atomic_t prog20;		/* 0x0020 */
	__u8	reserved24[4];		/* 0x0024 */
146 147 148
	__u64	cputm;			/* 0x0028 */
	__u64	ckc;			/* 0x0030 */
	__u64	epoch;			/* 0x0038 */
149
	__u32	svcc;			/* 0x0040 */
150
#define LCTL_CR0	0x8000
151
#define LCTL_CR6	0x0200
152 153 154
#define LCTL_CR9	0x0040
#define LCTL_CR10	0x0020
#define LCTL_CR11	0x0010
155
#define LCTL_CR14	0x0002
156 157
	__u16   lctl;			/* 0x0044 */
	__s16	icpua;			/* 0x0046 */
J
Janosch Frank 已提交
158
#define ICTL_OPEREXC	0x80000000
159 160 161 162 163 164
#define ICTL_PINT	0x20000000
#define ICTL_LPSW	0x00400000
#define ICTL_STCTL	0x00040000
#define ICTL_ISKE	0x00004000
#define ICTL_SSKE	0x00002000
#define ICTL_RRBE	0x00001000
165
#define ICTL_TPROT	0x00000200
166
	__u32	ictl;			/* 0x0048 */
167 168 169 170 171 172 173
#define ECA_CEI		0x80000000
#define ECA_IB		0x40000000
#define ECA_SIGPI	0x10000000
#define ECA_MVPGI	0x01000000
#define ECA_VX		0x00020000
#define ECA_PROTEXCI	0x00002000
#define ECA_SII		0x00000001
174
	__u32	eca;			/* 0x004c */
175 176 177
#define ICPT_INST	0x04
#define ICPT_PROGI	0x08
#define ICPT_INSTPROGI	0x0C
178 179 180
#define ICPT_EXTINT	0x14
#define ICPT_VALIDITY	0x20
#define ICPT_STOP	0x28
181 182 183
#define ICPT_OPEREXC	0x2C
#define ICPT_PARTEXEC	0x38
#define ICPT_IOINST	0x40
184
	__u8	icptcode;		/* 0x0050 */
185
	__u8	icptstatus;		/* 0x0051 */
186 187 188 189 190 191
	__u16	ihcpu;			/* 0x0052 */
	__u8	reserved54[2];		/* 0x0054 */
	__u16	ipa;			/* 0x0056 */
	__u32	ipb;			/* 0x0058 */
	__u32	scaoh;			/* 0x005c */
	__u8	reserved60;		/* 0x0060 */
192 193 194
#define ECB_TE		0x10
#define ECB_SRSI	0x04
#define ECB_HOSTPROTINT	0x02
195
	__u8	ecb;			/* 0x0061 */
196 197 198 199
#define ECB2_CMMA	0x80
#define ECB2_IEP	0x20
#define ECB2_PFMFI	0x08
#define ECB2_ESCA	0x04
200
	__u8    ecb2;                   /* 0x0062 */
201
#define ECB3_DEA 0x08
202 203
#define ECB3_AES 0x04
#define ECB3_RI  0x01
204
	__u8    ecb3;			/* 0x0063 */
205 206 207
	__u32	scaol;			/* 0x0064 */
	__u8	reserved68[4];		/* 0x0068 */
	__u32	todpr;			/* 0x006c */
208 209 210
	__u8	reserved70[16];		/* 0x0070 */
	__u64	mso;			/* 0x0080 */
	__u64	msl;			/* 0x0088 */
211 212 213
	psw_t	gpsw;			/* 0x0090 */
	__u64	gg14;			/* 0x00a0 */
	__u64	gg15;			/* 0x00a8 */
214 215 216 217
	__u8	reservedb0[20];		/* 0x00b0 */
	__u16	extcpuaddr;		/* 0x00c4 */
	__u16	eic;			/* 0x00c6 */
	__u32	reservedc8;		/* 0x00c8 */
218 219 220 221 222 223 224 225 226 227 228 229 230
	__u16	pgmilc;			/* 0x00cc */
	__u16	iprcc;			/* 0x00ce */
	__u32	dxc;			/* 0x00d0 */
	__u16	mcn;			/* 0x00d4 */
	__u8	perc;			/* 0x00d6 */
	__u8	peratmid;		/* 0x00d7 */
	__u64	peraddr;		/* 0x00d8 */
	__u8	eai;			/* 0x00e0 */
	__u8	peraid;			/* 0x00e1 */
	__u8	oai;			/* 0x00e2 */
	__u8	armid;			/* 0x00e3 */
	__u8	reservede4[4];		/* 0x00e4 */
	__u64	tecmc;			/* 0x00e8 */
231 232
	__u8	reservedf0[12];		/* 0x00f0 */
#define CRYCB_FORMAT1 0x00000001
233
#define CRYCB_FORMAT2 0x00000003
234
	__u32	crycbd;			/* 0x00fc */
235 236
	__u64	gcr[16];		/* 0x0100 */
	__u64	gbea;			/* 0x0180 */
237 238
	__u8	reserved188[24];	/* 0x0188 */
	__u32	fac;			/* 0x01a0 */
239 240
	__u8	reserved1a4[20];	/* 0x01a4 */
	__u64	cbrlo;			/* 0x01b8 */
241
	__u8	reserved1c0[8];		/* 0x01c0 */
242
#define ECD_HOSTREGMGMT	0x20000000
243 244
	__u32	ecd;			/* 0x01c8 */
	__u8	reserved1cc[18];	/* 0x01cc */
245 246
	__u64	pp;			/* 0x01de */
	__u8	reserved1e6[2];		/* 0x01e6 */
247
	__u64	itdba;			/* 0x01e8 */
248
	__u64   riccbd;			/* 0x01f0 */
249
	__u64	gvrd;			/* 0x01f8 */
250 251
} __attribute__((packed));

252 253 254 255 256 257 258 259
struct kvm_s390_itdb {
	__u8	data[256];
} __packed;

struct sie_page {
	struct kvm_s390_sie_block sie_block;
	__u8 reserved200[1024];		/* 0x0200 */
	struct kvm_s390_itdb itdb;	/* 0x0600 */
260
	__u8 reserved700[2304];		/* 0x0700 */
261 262
} __packed;

263
struct kvm_vcpu_stat {
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	u64 exit_userspace;
	u64 exit_null;
	u64 exit_external_request;
	u64 exit_external_interrupt;
	u64 exit_stop_request;
	u64 exit_validity;
	u64 exit_instruction;
	u64 exit_pei;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 instruction_lctl;
	u64 instruction_lctlg;
	u64 instruction_stctl;
	u64 instruction_stctg;
	u64 exit_program_interruption;
	u64 exit_instr_and_program;
	u64 exit_operation_exception;
	u64 deliver_external_call;
	u64 deliver_emergency_signal;
	u64 deliver_service_signal;
	u64 deliver_virtio_interrupt;
	u64 deliver_stop_signal;
	u64 deliver_prefix_signal;
	u64 deliver_restart_signal;
	u64 deliver_program_int;
	u64 deliver_io_int;
	u64 exit_wait_state;
	u64 instruction_pfmf;
	u64 instruction_stidp;
	u64 instruction_spx;
	u64 instruction_stpx;
	u64 instruction_stap;
	u64 instruction_storage_key;
	u64 instruction_ipte_interlock;
	u64 instruction_stsch;
	u64 instruction_chsc;
	u64 instruction_stsi;
	u64 instruction_stfl;
	u64 instruction_tprot;
	u64 instruction_sie;
	u64 instruction_essa;
	u64 instruction_sthyi;
	u64 instruction_sigp_sense;
	u64 instruction_sigp_sense_running;
	u64 instruction_sigp_external_call;
	u64 instruction_sigp_emergency;
	u64 instruction_sigp_cond_emergency;
	u64 instruction_sigp_start;
	u64 instruction_sigp_stop;
	u64 instruction_sigp_stop_store_status;
	u64 instruction_sigp_store_status;
	u64 instruction_sigp_store_adtl_status;
	u64 instruction_sigp_arch;
	u64 instruction_sigp_prefix;
	u64 instruction_sigp_restart;
	u64 instruction_sigp_init_cpu_reset;
	u64 instruction_sigp_cpu_reset;
	u64 instruction_sigp_unknown;
	u64 diagnose_10;
	u64 diagnose_44;
	u64 diagnose_9c;
	u64 diagnose_258;
	u64 diagnose_308;
	u64 diagnose_500;
330 331
};

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
#define PGM_OPERATION			0x01
#define PGM_PRIVILEGED_OP		0x02
#define PGM_EXECUTE			0x03
#define PGM_PROTECTION			0x04
#define PGM_ADDRESSING			0x05
#define PGM_SPECIFICATION		0x06
#define PGM_DATA			0x07
#define PGM_FIXED_POINT_OVERFLOW	0x08
#define PGM_FIXED_POINT_DIVIDE		0x09
#define PGM_DECIMAL_OVERFLOW		0x0a
#define PGM_DECIMAL_DIVIDE		0x0b
#define PGM_HFP_EXPONENT_OVERFLOW	0x0c
#define PGM_HFP_EXPONENT_UNDERFLOW	0x0d
#define PGM_HFP_SIGNIFICANCE		0x0e
#define PGM_HFP_DIVIDE			0x0f
#define PGM_SEGMENT_TRANSLATION		0x10
#define PGM_PAGE_TRANSLATION		0x11
#define PGM_TRANSLATION_SPEC		0x12
#define PGM_SPECIAL_OPERATION		0x13
#define PGM_OPERAND			0x15
#define PGM_TRACE_TABEL			0x16
E
Eric Farman 已提交
353
#define PGM_VECTOR_PROCESSING		0x1b
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
#define PGM_SPACE_SWITCH		0x1c
#define PGM_HFP_SQUARE_ROOT		0x1d
#define PGM_PC_TRANSLATION_SPEC		0x1f
#define PGM_AFX_TRANSLATION		0x20
#define PGM_ASX_TRANSLATION		0x21
#define PGM_LX_TRANSLATION		0x22
#define PGM_EX_TRANSLATION		0x23
#define PGM_PRIMARY_AUTHORITY		0x24
#define PGM_SECONDARY_AUTHORITY		0x25
#define PGM_LFX_TRANSLATION		0x26
#define PGM_LSX_TRANSLATION		0x27
#define PGM_ALET_SPECIFICATION		0x28
#define PGM_ALEN_TRANSLATION		0x29
#define PGM_ALE_SEQUENCE		0x2a
#define PGM_ASTE_VALIDITY		0x2b
#define PGM_ASTE_SEQUENCE		0x2c
#define PGM_EXTENDED_AUTHORITY		0x2d
#define PGM_LSTE_SEQUENCE		0x2e
#define PGM_ASTE_INSTANCE		0x2f
#define PGM_STACK_FULL			0x30
#define PGM_STACK_EMPTY			0x31
#define PGM_STACK_SPECIFICATION		0x32
#define PGM_STACK_TYPE			0x33
#define PGM_STACK_OPERATION		0x34
#define PGM_ASCE_TYPE			0x38
#define PGM_REGION_FIRST_TRANS		0x39
#define PGM_REGION_SECOND_TRANS		0x3a
#define PGM_REGION_THIRD_TRANS		0x3b
#define PGM_MONITOR			0x40
#define PGM_PER				0x80
#define PGM_CRYPTO_OPERATION		0x119
385

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
/* irq types in order of priority */
enum irq_types {
	IRQ_PEND_MCHK_EX = 0,
	IRQ_PEND_SVC,
	IRQ_PEND_PROG,
	IRQ_PEND_MCHK_REP,
	IRQ_PEND_EXT_IRQ_KEY,
	IRQ_PEND_EXT_MALFUNC,
	IRQ_PEND_EXT_EMERGENCY,
	IRQ_PEND_EXT_EXTERNAL,
	IRQ_PEND_EXT_CLOCK_COMP,
	IRQ_PEND_EXT_CPU_TIMER,
	IRQ_PEND_EXT_TIMING,
	IRQ_PEND_EXT_SERVICE,
	IRQ_PEND_EXT_HOST,
	IRQ_PEND_PFAULT_INIT,
	IRQ_PEND_PFAULT_DONE,
	IRQ_PEND_VIRTIO,
	IRQ_PEND_IO_ISC_0,
	IRQ_PEND_IO_ISC_1,
	IRQ_PEND_IO_ISC_2,
	IRQ_PEND_IO_ISC_3,
	IRQ_PEND_IO_ISC_4,
	IRQ_PEND_IO_ISC_5,
	IRQ_PEND_IO_ISC_6,
	IRQ_PEND_IO_ISC_7,
	IRQ_PEND_SIGP_STOP,
	IRQ_PEND_RESTART,
	IRQ_PEND_SET_PREFIX,
	IRQ_PEND_COUNT
};

418 419 420 421 422
/* We have 2M for virtio device descriptor pages. Smallest amount of
 * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
 */
#define KVM_S390_MAX_VIRTIO_IRQS 87381

423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/*
 * Repressible (non-floating) machine check interrupts
 * subclass bits in MCIC
 */
#define MCHK_EXTD_BIT 58
#define MCHK_DEGR_BIT 56
#define MCHK_WARN_BIT 55
#define MCHK_REP_MASK ((1UL << MCHK_DEGR_BIT) | \
		       (1UL << MCHK_EXTD_BIT) | \
		       (1UL << MCHK_WARN_BIT))

/* Exigent machine check interrupts subclass bits in MCIC */
#define MCHK_SD_BIT 63
#define MCHK_PD_BIT 62
#define MCHK_EX_MASK ((1UL << MCHK_SD_BIT) | (1UL << MCHK_PD_BIT))

#define IRQ_PEND_EXT_MASK ((1UL << IRQ_PEND_EXT_IRQ_KEY)    | \
			   (1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
			   (1UL << IRQ_PEND_EXT_CPU_TIMER)  | \
			   (1UL << IRQ_PEND_EXT_MALFUNC)    | \
			   (1UL << IRQ_PEND_EXT_EMERGENCY)  | \
			   (1UL << IRQ_PEND_EXT_EXTERNAL)   | \
			   (1UL << IRQ_PEND_EXT_TIMING)     | \
			   (1UL << IRQ_PEND_EXT_HOST)       | \
			   (1UL << IRQ_PEND_EXT_SERVICE)    | \
			   (1UL << IRQ_PEND_VIRTIO)         | \
			   (1UL << IRQ_PEND_PFAULT_INIT)    | \
			   (1UL << IRQ_PEND_PFAULT_DONE))

#define IRQ_PEND_IO_MASK ((1UL << IRQ_PEND_IO_ISC_0) | \
			  (1UL << IRQ_PEND_IO_ISC_1) | \
			  (1UL << IRQ_PEND_IO_ISC_2) | \
			  (1UL << IRQ_PEND_IO_ISC_3) | \
			  (1UL << IRQ_PEND_IO_ISC_4) | \
			  (1UL << IRQ_PEND_IO_ISC_5) | \
			  (1UL << IRQ_PEND_IO_ISC_6) | \
			  (1UL << IRQ_PEND_IO_ISC_7))

#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
			    (1UL << IRQ_PEND_MCHK_EX))

464
struct kvm_s390_interrupt_info {
465 466 467
	struct list_head list;
	u64	type;
	union {
468 469 470
		struct kvm_s390_io_info io;
		struct kvm_s390_ext_info ext;
		struct kvm_s390_pgm_info pgm;
471
		struct kvm_s390_emerg_info emerg;
472
		struct kvm_s390_extcall_info extcall;
473
		struct kvm_s390_prefix_info prefix;
474
		struct kvm_s390_stop_info stop;
475
		struct kvm_s390_mchk_info mchk;
476 477 478
	};
};

479 480 481 482 483 484 485
struct kvm_s390_irq_payload {
	struct kvm_s390_io_info io;
	struct kvm_s390_ext_info ext;
	struct kvm_s390_pgm_info pgm;
	struct kvm_s390_emerg_info emerg;
	struct kvm_s390_extcall_info extcall;
	struct kvm_s390_prefix_info prefix;
486
	struct kvm_s390_stop_info stop;
487 488 489
	struct kvm_s390_mchk_info mchk;
};

490
struct kvm_s390_local_interrupt {
491
	spinlock_t lock;
492
	struct kvm_s390_float_interrupt *float_int;
493
	struct swait_queue_head *wq;
494
	atomic_t *cpuflags;
495 496 497
	DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
	struct kvm_s390_irq_payload irq;
	unsigned long pending_irqs;
498 499
};

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
#define FIRQ_LIST_IO_ISC_0 0
#define FIRQ_LIST_IO_ISC_1 1
#define FIRQ_LIST_IO_ISC_2 2
#define FIRQ_LIST_IO_ISC_3 3
#define FIRQ_LIST_IO_ISC_4 4
#define FIRQ_LIST_IO_ISC_5 5
#define FIRQ_LIST_IO_ISC_6 6
#define FIRQ_LIST_IO_ISC_7 7
#define FIRQ_LIST_PFAULT   8
#define FIRQ_LIST_VIRTIO   9
#define FIRQ_LIST_COUNT   10
#define FIRQ_CNTR_IO       0
#define FIRQ_CNTR_SERVICE  1
#define FIRQ_CNTR_VIRTIO   2
#define FIRQ_CNTR_PFAULT   3
#define FIRQ_MAX_COUNT     4

517
struct kvm_s390_float_interrupt {
518
	unsigned long pending_irqs;
519
	spinlock_t lock;
520 521 522 523
	struct list_head lists[FIRQ_LIST_COUNT];
	int counters[FIRQ_MAX_COUNT];
	struct kvm_s390_mchk_info mchk;
	struct kvm_s390_ext_info srv_signal;
524
	int next_rr_cpu;
525
	unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
526 527
};

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
struct kvm_hw_wp_info_arch {
	unsigned long addr;
	unsigned long phys_addr;
	int len;
	char *old_data;
};

struct kvm_hw_bp_info_arch {
	unsigned long addr;
	int len;
};

/*
 * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
 * Further KVM_GUESTDBG flags which an be used from userspace can be found in
 * arch/s390/include/uapi/asm/kvm.h
 */
#define KVM_GUESTDBG_EXIT_PENDING 0x10000000

#define guestdbg_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
#define guestdbg_sstep_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
#define guestdbg_hw_bp_enabled(vcpu) \
		(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
		(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))

struct kvm_guestdbg_info_arch {
	unsigned long cr0;
	unsigned long cr9;
	unsigned long cr10;
	unsigned long cr11;
	struct kvm_hw_bp_info_arch *hw_bp_info;
	struct kvm_hw_wp_info_arch *hw_wp_info;
	int nr_hw_bp;
	int nr_hw_wp;
	unsigned long last_bp;
};
567

568
struct kvm_vcpu_arch {
569
	struct kvm_s390_sie_block *sie_block;
570 571
	/* if vsie is active, currently executed shadow sie control block */
	struct kvm_s390_sie_block *vsie_block;
572
	unsigned int      host_acrs[NUM_ACRS];
573
	struct fpu	  host_fpregs;
574
	struct kvm_s390_local_interrupt local_int;
575
	struct hrtimer    ckc_timer;
576
	struct kvm_s390_pgm_info pgm;
577
	struct gmap *gmap;
578 579
	/* backup location for the currently enabled gmap when scheduled out */
	struct gmap *enabled_gmap;
580
	struct kvm_guestdbg_info_arch guestdbg;
581 582 583
	unsigned long pfault_token;
	unsigned long pfault_select;
	unsigned long pfault_compare;
584
	bool cputm_enabled;
585 586 587 588 589 590 591
	/*
	 * The seqcount protects updates to cputm_start and sie_block.cputm,
	 * this way we can have non-blocking reads with consistent values.
	 * Only the owning VCPU thread (vcpu->cpu) is allowed to change these
	 * values and to start/stop/enable/disable cpu timer accounting.
	 */
	seqcount_t cputm_seqcount;
592
	__u64 cputm_start;
593 594 595
};

struct kvm_vm_stat {
596
	ulong remote_tlb_flush;
597 598
};

599 600 601
struct kvm_arch_memory_slot {
};

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
struct s390_map_info {
	struct list_head list;
	__u64 guest_addr;
	__u64 addr;
	struct page *page;
};

struct s390_io_adapter {
	unsigned int id;
	int isc;
	bool maskable;
	bool masked;
	bool swap;
	struct rw_semaphore maps_lock;
	struct list_head maps;
	atomic_t nr_maps;
};

#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
#define MAX_S390_ADAPTER_MAPS 256

623 624 625 626 627 628 629 630 631
/* maximum size of facilities and facility mask is 2k bytes */
#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11)
#define S390_ARCH_FAC_LIST_SIZE_U64 \
	(S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64))
#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE
#define S390_ARCH_FAC_MASK_SIZE_U64 \
	(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))

struct kvm_s390_cpu_model {
632 633 634 635
	/* facility mask supported by kvm & hosting machine */
	__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
	/* facility list requested by guest (in dma page) */
	__u64 *fac_list;
636
	u64 cpuid;
637
	unsigned short ibc;
638 639
};

640 641 642
struct kvm_s390_crypto {
	struct kvm_s390_crypto_cb *crycb;
	__u32 crycbd;
643 644
	__u8 aes_kw;
	__u8 dea_kw;
645 646 647
};

struct kvm_s390_crypto_cb {
648 649 650
	__u8    reserved00[72];                 /* 0x0000 */
	__u8    dea_wrapping_key_mask[24];      /* 0x0048 */
	__u8    aes_wrapping_key_mask[32];      /* 0x0060 */
651
	__u8    reserved80[128];                /* 0x0080 */
652 653
};

654 655 656 657 658 659 660 661 662 663
/*
 * sie_page2 has to be allocated as DMA because fac_list and crycb need
 * 31bit addresses in the sie control block.
 */
struct sie_page2 {
	__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64];	/* 0x0000 */
	struct kvm_s390_crypto_cb crycb;		/* 0x0800 */
	u8 reserved900[0x1000 - 0x900];			/* 0x0900 */
} __packed;

664 665 666 667 668 669 670 671
struct kvm_s390_vsie {
	struct mutex mutex;
	struct radix_tree_root addr_to_page;
	int page_count;
	int next;
	struct page *pages[KVM_MAX_VCPUS];
};

672
struct kvm_arch{
673 674
	void *sca;
	int use_esca;
675
	rwlock_t sca_lock;
676
	debug_info_t *dbf;
677
	struct kvm_s390_float_interrupt float_int;
678
	struct kvm_device *flic;
679
	struct gmap *gmap;
680
	unsigned long mem_limit;
681
	int css_support;
682
	int use_irqchip;
683
	int use_cmma;
684
	int user_cpu_state_ctrl;
685
	int user_sigp;
686
	int user_stsi;
687
	int user_instr0;
688
	struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
689
	wait_queue_head_t ipte_wq;
690 691
	int ipte_lock_count;
	struct mutex ipte_mutex;
J
Janosch Frank 已提交
692
	struct ratelimit_state sthyi_limit;
693
	spinlock_t start_stop_lock;
694
	struct sie_page2 *sie_page2;
695
	struct kvm_s390_cpu_model model;
696
	struct kvm_s390_crypto crypto;
697
	struct kvm_s390_vsie vsie;
698
	u64 epoch;
699 700
	/* subset of available cpu features enabled by user space */
	DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
701 702
};

703 704 705 706 707 708 709 710
#define KVM_HVA_ERR_BAD		(-1UL)
#define KVM_HVA_ERR_RO_BAD	(-2UL)

static inline bool kvm_is_error_hva(unsigned long addr)
{
	return IS_ERR_VALUE(addr);
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
#define ASYNC_PF_PER_VCPU	64
struct kvm_arch_async_pf {
	unsigned long pfault_token;
};

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);

727
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
728
extern char sie_exit;
729

730
static inline void kvm_arch_hardware_disable(void) {}
731 732 733 734 735 736
static inline void kvm_arch_check_processor_compat(void *rtn) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
		struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
737
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
738 739 740
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
		struct kvm_memory_slot *slot) {}
741 742
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
743

744 745
void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);

746
#endif