vcpu.h 20.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/*
 *  vcpu.h: vcpu routines
 *  	Copyright (c) 2005, Intel Corporation.
 *  	Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
 *  	Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
 *
 * 	Copyright (c) 2007, Intel Corporation.
 *  	Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
 *	Xiantao Zhang (xiantao.zhang@intel.com)
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 */


#ifndef __KVM_VCPU_H__
#define __KVM_VCPU_H__

#include <asm/types.h>
#include <asm/fpu.h>
#include <asm/processor.h>

#ifndef __ASSEMBLY__
#include "vti.h"

#include <linux/kvm_host.h>
#include <linux/spinlock.h>

typedef unsigned long IA64_INST;

typedef union U_IA64_BUNDLE {
	unsigned long i64[2];
	struct { unsigned long template:5, slot0:41, slot1a:18,
		slot1b:23, slot2:41; };
	/* NOTE: following doesn't work because bitfields can't cross natural
	   size boundaries
	   struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
} IA64_BUNDLE;

typedef union U_INST64_A5 {
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
		imm9d:9, s:1, major:4; };
} INST64_A5;

typedef union U_INST64_B4 {
	IA64_INST inst;
	struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
		wh:2, d:1, un1:1, major:4; };
} INST64_B4;

typedef union U_INST64_B8 {
	IA64_INST inst;
	struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
} INST64_B8;

typedef union U_INST64_B9 {
	IA64_INST inst;
	struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
} INST64_B9;

typedef union U_INST64_I19 {
	IA64_INST inst;
	struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
} INST64_I19;

typedef union U_INST64_I26 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
} INST64_I26;

typedef union U_INST64_I27 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
} INST64_I27;

typedef union U_INST64_I28 { /* not privileged (mov from AR) */
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
} INST64_I28;

typedef union U_INST64_M28 {
	IA64_INST inst;
	struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
} INST64_M28;

typedef union U_INST64_M29 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
} INST64_M29;

typedef union U_INST64_M30 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
		x3:3, s:1, major:4; };
} INST64_M30;

typedef union U_INST64_M31 {
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
} INST64_M31;

typedef union U_INST64_M32 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
} INST64_M32;

typedef union U_INST64_M33 {
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
} INST64_M33;

typedef union U_INST64_M35 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };

} INST64_M35;

typedef union U_INST64_M36 {
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
} INST64_M36;

typedef union U_INST64_M37 {
	IA64_INST inst;
	struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
		i:1, major:4; };
} INST64_M37;

typedef union U_INST64_M41 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
} INST64_M41;

typedef union U_INST64_M42 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
} INST64_M42;

typedef union U_INST64_M43 {
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
} INST64_M43;

typedef union U_INST64_M44 {
	IA64_INST inst;
	struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
} INST64_M44;

typedef union U_INST64_M45 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
} INST64_M45;

typedef union U_INST64_M46 {
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
		x3:3, un1:1, major:4; };
} INST64_M46;

typedef union U_INST64_M47 {
	IA64_INST inst;
	struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
} INST64_M47;

typedef union U_INST64_M1{
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
		x6:6, m:1, major:4; };
} INST64_M1;

typedef union U_INST64_M2{
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
		x6:6, m:1, major:4; };
} INST64_M2;

typedef union U_INST64_M3{
	IA64_INST inst;
	struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
		x6:6, s:1, major:4; };
} INST64_M3;

typedef union U_INST64_M4 {
	IA64_INST inst;
	struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
		x6:6, m:1, major:4; };
} INST64_M4;

typedef union U_INST64_M5 {
	IA64_INST inst;
	struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
		x6:6, s:1, major:4; };
} INST64_M5;

typedef union U_INST64_M6 {
	IA64_INST inst;
	struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
		x6:6, m:1, major:4; };
} INST64_M6;

typedef union U_INST64_M9 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
		x6:6, m:1, major:4; };
} INST64_M9;

typedef union U_INST64_M10 {
	IA64_INST inst;
	struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
		x6:6, s:1, major:4; };
} INST64_M10;

typedef union U_INST64_M12 {
	IA64_INST inst;
	struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
		x6:6, m:1, major:4; };
} INST64_M12;

typedef union U_INST64_M15 {
	IA64_INST inst;
	struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
		x6:6, s:1, major:4; };
} INST64_M15;

typedef union U_INST64 {
	IA64_INST inst;
	struct { unsigned long :37, major:4; } generic;
	INST64_A5 A5;	/* used in build_hypercall_bundle only */
	INST64_B4 B4;	/* used in build_hypercall_bundle only */
	INST64_B8 B8;	/* rfi, bsw.[01] */
	INST64_B9 B9;	/* break.b */
	INST64_I19 I19;	/* used in build_hypercall_bundle only */
	INST64_I26 I26;	/* mov register to ar (I unit) */
	INST64_I27 I27;	/* mov immediate to ar (I unit) */
	INST64_I28 I28;	/* mov from ar (I unit) */
	INST64_M1  M1;	/* ld integer */
	INST64_M2  M2;
	INST64_M3  M3;
	INST64_M4  M4;	/* st integer */
	INST64_M5  M5;
	INST64_M6  M6;	/* ldfd floating pointer 		*/
	INST64_M9  M9;	/* stfd floating pointer		*/
	INST64_M10 M10;	/* stfd floating pointer		*/
	INST64_M12 M12;     /* ldfd pair floating pointer		*/
	INST64_M15 M15;	/* lfetch + imm update			*/
	INST64_M28 M28;	/* purge translation cache entry	*/
	INST64_M29 M29;	/* mov register to ar (M unit)		*/
	INST64_M30 M30;	/* mov immediate to ar (M unit)		*/
	INST64_M31 M31;	/* mov from ar (M unit)			*/
	INST64_M32 M32;	/* mov reg to cr			*/
	INST64_M33 M33;	/* mov from cr				*/
	INST64_M35 M35;	/* mov to psr				*/
	INST64_M36 M36;	/* mov from psr				*/
	INST64_M37 M37;	/* break.m				*/
	INST64_M41 M41;	/* translation cache insert		*/
	INST64_M42 M42;	/* mov to indirect reg/translation reg insert*/
	INST64_M43 M43;	/* mov from indirect reg		*/
	INST64_M44 M44;	/* set/reset system mask		*/
	INST64_M45 M45;	/* translation purge			*/
	INST64_M46 M46;	/* translation access (tpa,tak)		*/
	INST64_M47 M47;	/* purge translation entry		*/
} INST64;

#define MASK_41 ((unsigned long)0x1ffffffffff)

/* Virtual address memory attributes encoding */
#define VA_MATTR_WB         0x0
#define VA_MATTR_UC         0x4
#define VA_MATTR_UCE        0x5
#define VA_MATTR_WC         0x6
#define VA_MATTR_NATPAGE    0x7

#define PMASK(size)         (~((size) - 1))
#define PSIZE(size)         (1UL<<(size))
#define CLEARLSB(ppn, nbits)    (((ppn) >> (nbits)) << (nbits))
#define PAGEALIGN(va, ps)	CLEARLSB(va, ps)
#define PAGE_FLAGS_RV_MASK   (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
#define _PAGE_MA_ST     (0x1 <<  2) /* is reserved for software use */

#define ARCH_PAGE_SHIFT   12

#define INVALID_TI_TAG (1UL << 63)

#define VTLB_PTE_P_BIT      0
#define VTLB_PTE_IO_BIT     60
#define VTLB_PTE_IO         (1UL<<VTLB_PTE_IO_BIT)
#define VTLB_PTE_P          (1UL<<VTLB_PTE_P_BIT)

#define vcpu_quick_region_check(_tr_regions,_ifa)		\
	(_tr_regions & (1 << ((unsigned long)_ifa >> 61)))

#define vcpu_quick_region_set(_tr_regions,_ifa)             \
	do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)

static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
		u64 va, u64 rid)
{
	trp->page_flags = pte;
	trp->itir = itir;
	trp->vadr = va;
	trp->rid = rid;
}

316
extern u64 kvm_get_mpt_entry(u64 gpfn);
317

318 319 320 321 322 323 324 325 326 327 328 329
/* Return I/ */
static inline u64 __gpfn_is_io(u64 gpfn)
{
	u64  pte;
	pte = kvm_get_mpt_entry(gpfn);
	if (!(pte & GPFN_INV_MASK)) {
		pte = pte & GPFN_IO_MASK;
		if (pte != GPFN_PHYS_MMIO)
			return pte;
	}
	return 0;
}
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
#endif
#define IA64_NO_FAULT	0
#define IA64_FAULT	1

#define VMM_RBS_OFFSET  ((VMM_TASK_SIZE + 15) & ~15)

#define SW_BAD  0   /* Bad mode transitition */
#define SW_V2P  1   /* Physical emulatino is activated */
#define SW_P2V  2   /* Exit physical mode emulation */
#define SW_SELF 3   /* No mode transition */
#define SW_NOP  4   /* Mode transition, but without action required */

#define GUEST_IN_PHY    0x1
#define GUEST_PHY_EMUL  0x2

#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))

#define VRN_SHIFT	61
#define VRN_MASK	0xe000000000000000
#define VRN0		0x0UL
#define VRN1		0x1UL
#define VRN2		0x2UL
#define VRN3		0x3UL
#define VRN4		0x4UL
#define VRN5		0x5UL
#define VRN6		0x6UL
#define VRN7		0x7UL

#define IRQ_NO_MASKED         0
#define IRQ_MASKED_BY_VTPR    1
#define IRQ_MASKED_BY_INSVC   2   /* masked by inservice IRQ */

#define PTA_BASE_SHIFT      15

#define IA64_PSR_VM_BIT     46
#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)

/* Interruption Function State */
#define IA64_IFS_V_BIT      63
#define IA64_IFS_V  (__IA64_UL(1) << IA64_IFS_V_BIT)

#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)

#ifndef __ASSEMBLY__

#include <asm/gcc_intrin.h>

#define is_physical_mode(v)		\
	((v->arch.mode_flags) & GUEST_IN_PHY)

#define is_virtual_mode(v)	\
	(!is_physical_mode(v))

#define MODE_IND(psr)	\
	(((psr).it << 2) + ((psr).dt << 1) + (psr).rt)

387 388 389 390
#ifndef CONFIG_SMP
#define _vmm_raw_spin_lock(x)	 do {}while(0)
#define _vmm_raw_spin_unlock(x) do {}while(0)
#else
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
#define _vmm_raw_spin_lock(x)						\
	do {								\
		__u32 *ia64_spinlock_ptr = (__u32 *) (x);		\
		__u64 ia64_spinlock_val;				\
		ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
		if (unlikely(ia64_spinlock_val)) {			\
			do {						\
				while (*ia64_spinlock_ptr)		\
				ia64_barrier();				\
				ia64_spinlock_val =			\
				ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
			} while (ia64_spinlock_val);			\
		}							\
	} while (0)

#define _vmm_raw_spin_unlock(x)				\
	do { barrier();				\
		((spinlock_t *)x)->raw_lock.lock = 0; } \
while (0)
410
#endif
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739

void vmm_spin_lock(spinlock_t *lock);
void vmm_spin_unlock(spinlock_t *lock);
enum {
	I_TLB = 1,
	D_TLB = 2
};

union kvm_va {
	struct {
		unsigned long off : 60;		/* intra-region offset */
		unsigned long reg :  4;		/* region number */
	} f;
	unsigned long l;
	void *p;
};

#define __kvm_pa(x)     ({union kvm_va _v; _v.l = (long) (x);		\
						_v.f.reg = 0; _v.l; })
#define __kvm_va(x)     ({union kvm_va _v; _v.l = (long) (x);		\
				_v.f.reg = -1; _v.p; })

#define _REGION_ID(x)           ({union ia64_rr _v; _v.val = (long)(x); \
						_v.rid; })
#define _REGION_PAGE_SIZE(x)    ({union ia64_rr _v; _v.val = (long)(x); \
						_v.ps; })
#define _REGION_HW_WALKER(x)    ({union ia64_rr _v; _v.val = (long)(x);	\
						_v.ve; })

enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };

#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
#define VMX(_v, _x)  ((_v)->arch._x)

#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
#define VLSAPIC_XTP(_v)        VMX(_v, xtp)

static inline unsigned long itir_ps(unsigned long itir)
{
	return ((itir >> 2) & 0x3f);
}


/**************************************************************************
  VCPU control register access routines
 **************************************************************************/

static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, itir));
}

static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, itir) = val;
}

static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, ifa));
}

static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, ifa) = val;
}

static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, iva));
}

static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, pta));
}

static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, lid));
}

static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, tpr));
}

static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
{
	return (0UL);		/*reads of eoi always return 0 */
}

static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, irr[0]));
}

static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, irr[1]));
}

static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, irr[2]));
}

static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
{
	return ((u64)VCPU(vcpu, irr[3]));
}

static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
{
	ia64_setreg(_IA64_REG_CR_DCR, val);
}

static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, isr) = val;
}

static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, lid) = val;
}

static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, ipsr) = val;
}

static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, iip) = val;
}

static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, ifs) = val;
}

static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, iipa) = val;
}

static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
{
	VCPU(vcpu, iha) = val;
}


static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
{
	return vcpu->arch.vrr[reg>>61];
}

/**************************************************************************
  VCPU debug breakpoint register access routines
 **************************************************************************/

static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
{
	__ia64_set_dbr(reg, val);
}

static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
{
	ia64_set_ibr(reg, val);
}

static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
{
	return ((u64)__ia64_get_dbr(reg));
}

static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
{
	return ((u64)ia64_get_ibr(reg));
}

/**************************************************************************
  VCPU performance monitor register access routines
 **************************************************************************/
static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
{
	/* NOTE: Writes to unimplemented PMC registers are discarded */
	ia64_set_pmc(reg, val);
}

static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
{
	/* NOTE: Writes to unimplemented PMD registers are discarded */
	ia64_set_pmd(reg, val);
}

static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
{
	/* NOTE: Reads from unimplemented PMC registers return zero */
	return ((u64)ia64_get_pmc(reg));
}

static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
{
	/* NOTE: Reads from unimplemented PMD registers return zero */
	return ((u64)ia64_get_pmd(reg));
}

static inline unsigned long vrrtomrr(unsigned long val)
{
	union ia64_rr rr;
	rr.val = val;
	rr.rid = (rr.rid << 4) | 0xe;
	if (rr.ps > PAGE_SHIFT)
		rr.ps = PAGE_SHIFT;
	rr.ve = 1;
	return rr.val;
}


static inline int highest_bits(int *dat)
{
	u32  bits, bitnum;
	int i;

	/* loop for all 256 bits */
	for (i = 7; i >= 0 ; i--) {
		bits = dat[i];
		if (bits) {
			bitnum = fls(bits);
			return i * 32 + bitnum - 1;
		}
	}
	return NULL_VECTOR;
}

/*
 * The pending irq is higher than the inservice one.
 *
 */
static inline int is_higher_irq(int pending, int inservice)
{
	return ((pending > inservice)
			|| ((pending != NULL_VECTOR)
				&& (inservice == NULL_VECTOR)));
}

static inline int is_higher_class(int pending, int mic)
{
	return ((pending >> 4) > mic);
}

/*
 * Return 0-255 for pending irq.
 *        NULL_VECTOR: when no pending.
 */
static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
{
	if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
		return NMI_VECTOR;
	if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
		return ExtINT_VECTOR;

	return highest_bits((int *)&VCPU(vcpu, irr[0]));
}

static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
{
	if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
		return NMI_VECTOR;
	if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
		return ExtINT_VECTOR;

	return highest_bits((int *)&(VMX(vcpu, insvc[0])));
}

extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg,
					struct ia64_fpreg *val);
extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg,
					struct ia64_fpreg *val);
extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg);
extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat);
extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu);
extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val);
extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
					u64 itir, u64 va, int type);
extern struct thash_data *vhpt_lookup(u64 va);
extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
		u64 itir, u64 ifa, int type);
extern void thash_purge_all(struct kvm_vcpu *v);
extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
						u64 va, int is_data);
extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
						u64 ps, int is_data);

extern void vcpu_increment_iip(struct kvm_vcpu *v);
extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
extern void nested_dtlb(struct kvm_vcpu *vcpu);
extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);

extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);

extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
extern void vmm_transition(struct kvm_vcpu *vcpu);
extern void vmm_trampoline(union context *from, union context *to);
extern int vmm_entry(void);
extern  u64 vcpu_get_itc(struct kvm_vcpu *vcpu);

extern void vmm_reset_entry(void);
void kvm_init_vtlb(struct kvm_vcpu *v);
void kvm_init_vhpt(struct kvm_vcpu *v);
void thash_init(struct thash_cb *hcb, u64 sz);

740
void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
741 742 743

extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
		u64 arg4, u64 arg5, u64 arg6, u64 arg7);
744 745 746

extern long vmm_sanity;

747 748
#endif
#endif	/* __VCPU_H__ */