提交 8fe07367 编写于 作者: X Xiantao Zhang 提交者: Avi Kivity

KVM: ia64: Clean up vmm_ivt.S using tab to indent every line

Using tab for indentation for vmm_ivt.S.
Signed-off-by: NXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 9f7d5bb5
/* /*
* /ia64/kvm_ivt.S * arch/ia64/kvm/vmm_ivt.S
* *
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
...@@ -71,37 +71,37 @@ ...@@ -71,37 +71,37 @@
#endif #endif
#define KVM_FAULT(n) \ #define KVM_FAULT(n) \
kvm_fault_##n:; \ kvm_fault_##n:; \
mov r19=n;; \ mov r19=n;; \
br.sptk.many kvm_vmm_panic; \ br.sptk.many kvm_vmm_panic; \
;; \ ;; \
#define KVM_REFLECT(n) \ #define KVM_REFLECT(n) \
mov r31=pr; \ mov r31=pr; \
mov r19=n; /* prepare to save predicates */ \ mov r19=n; /* prepare to save predicates */ \
mov r29=cr.ipsr; \ mov r29=cr.ipsr; \
;; \ ;; \
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \
(p7)br.sptk.many kvm_dispatch_reflection; \ (p7) br.sptk.many kvm_dispatch_reflection; \
br.sptk.many kvm_vmm_panic; \ br.sptk.many kvm_vmm_panic; \
GLOBAL_ENTRY(kvm_vmm_panic) GLOBAL_ENTRY(kvm_vmm_panic)
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,1,0 alloc r14=ar.pfs,0,0,1,0
mov out0=r15 mov out0=r15
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
ssm psr.ic ssm psr.ic
;; ;;
srlz.i // guarantee that interruption collection is on srlz.i // guarantee that interruption collection is on
;; ;;
//(p15) ssm psr.i // restore psr.i //(p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp addl r14=@gprel(ia64_leave_hypervisor),gp
;; ;;
KVM_SAVE_REST KVM_SAVE_REST
mov rp=r14 mov rp=r14
;; ;;
br.call.sptk.many b6=vmm_panic_handler; br.call.sptk.many b6=vmm_panic_handler;
END(kvm_vmm_panic) END(kvm_vmm_panic)
.section .text.ivt,"ax" .section .text.ivt,"ax"
...@@ -112,308 +112,307 @@ kvm_ia64_ivt: ...@@ -112,308 +112,307 @@ kvm_ia64_ivt:
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
ENTRY(kvm_vhpt_miss) ENTRY(kvm_vhpt_miss)
KVM_FAULT(0) KVM_FAULT(0)
END(kvm_vhpt_miss) END(kvm_vhpt_miss)
.org kvm_ia64_ivt+0x400 .org kvm_ia64_ivt+0x400
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// 0x0400 Entry 1 (size 64 bundles) ITLB (21) // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
ENTRY(kvm_itlb_miss) ENTRY(kvm_itlb_miss)
mov r31 = pr mov r31 = pr
mov r29=cr.ipsr; mov r29=cr.ipsr;
;; ;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p6) br.sptk kvm_alt_itlb_miss (p6) br.sptk kvm_alt_itlb_miss
mov r19 = 1 mov r19 = 1
br.sptk kvm_itlb_miss_dispatch br.sptk kvm_itlb_miss_dispatch
KVM_FAULT(1); KVM_FAULT(1);
END(kvm_itlb_miss) END(kvm_itlb_miss)
.org kvm_ia64_ivt+0x0800 .org kvm_ia64_ivt+0x0800
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
ENTRY(kvm_dtlb_miss) ENTRY(kvm_dtlb_miss)
mov r31 = pr mov r31 = pr
mov r29=cr.ipsr; mov r29=cr.ipsr;
;; ;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT; tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
(p6)br.sptk kvm_alt_dtlb_miss (p6) br.sptk kvm_alt_dtlb_miss
br.sptk kvm_dtlb_miss_dispatch br.sptk kvm_dtlb_miss_dispatch
END(kvm_dtlb_miss) END(kvm_dtlb_miss)
.org kvm_ia64_ivt+0x0c00 .org kvm_ia64_ivt+0x0c00
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
ENTRY(kvm_alt_itlb_miss) ENTRY(kvm_alt_itlb_miss)
mov r16=cr.ifa // get address that caused the TLB miss mov r16=cr.ifa // get address that caused the TLB miss
;; ;;
movl r17=PAGE_KERNEL movl r17=PAGE_KERNEL
mov r24=cr.ipsr mov r24=cr.ipsr
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
;; ;;
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
;; ;;
or r19=r17,r19 // insert PTE control bits into r19 or r19=r17,r19 // insert PTE control bits into r19
;; ;;
movl r20=IA64_GRANULE_SHIFT<<2 movl r20=IA64_GRANULE_SHIFT<<2
;; ;;
mov cr.itir=r20 mov cr.itir=r20
;; ;;
itc.i r19 // insert the TLB entry itc.i r19 // insert the TLB entry
mov pr=r31,-1 mov pr=r31,-1
rfi rfi
END(kvm_alt_itlb_miss) END(kvm_alt_itlb_miss)
.org kvm_ia64_ivt+0x1000 .org kvm_ia64_ivt+0x1000
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
ENTRY(kvm_alt_dtlb_miss) ENTRY(kvm_alt_dtlb_miss)
mov r16=cr.ifa // get address that caused the TLB miss mov r16=cr.ifa // get address that caused the TLB miss
;; ;;
movl r17=PAGE_KERNEL movl r17=PAGE_KERNEL
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r24=cr.ipsr mov r24=cr.ipsr
;; ;;
and r19=r19,r16 // clear ed, reserved bits, and PTE control bits and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
;; ;;
or r19=r19,r17 // insert PTE control bits into r19 or r19=r19,r17 // insert PTE control bits into r19
;; ;;
movl r20=IA64_GRANULE_SHIFT<<2 movl r20=IA64_GRANULE_SHIFT<<2
;; ;;
mov cr.itir=r20 mov cr.itir=r20
;; ;;
itc.d r19 // insert the TLB entry itc.d r19 // insert the TLB entry
mov pr=r31,-1 mov pr=r31,-1
rfi rfi
END(kvm_alt_dtlb_miss) END(kvm_alt_dtlb_miss)
.org kvm_ia64_ivt+0x1400 .org kvm_ia64_ivt+0x1400
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
ENTRY(kvm_nested_dtlb_miss) ENTRY(kvm_nested_dtlb_miss)
KVM_FAULT(5) KVM_FAULT(5)
END(kvm_nested_dtlb_miss) END(kvm_nested_dtlb_miss)
.org kvm_ia64_ivt+0x1800 .org kvm_ia64_ivt+0x1800
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
ENTRY(kvm_ikey_miss) ENTRY(kvm_ikey_miss)
KVM_REFLECT(6) KVM_REFLECT(6)
END(kvm_ikey_miss) END(kvm_ikey_miss)
.org kvm_ia64_ivt+0x1c00 .org kvm_ia64_ivt+0x1c00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
ENTRY(kvm_dkey_miss) ENTRY(kvm_dkey_miss)
KVM_REFLECT(7) KVM_REFLECT(7)
END(kvm_dkey_miss) END(kvm_dkey_miss)
.org kvm_ia64_ivt+0x2000 .org kvm_ia64_ivt+0x2000
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
ENTRY(kvm_dirty_bit) ENTRY(kvm_dirty_bit)
KVM_REFLECT(8) KVM_REFLECT(8)
END(kvm_dirty_bit) END(kvm_dirty_bit)
.org kvm_ia64_ivt+0x2400 .org kvm_ia64_ivt+0x2400
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
ENTRY(kvm_iaccess_bit) ENTRY(kvm_iaccess_bit)
KVM_REFLECT(9) KVM_REFLECT(9)
END(kvm_iaccess_bit) END(kvm_iaccess_bit)
.org kvm_ia64_ivt+0x2800 .org kvm_ia64_ivt+0x2800
/////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////
// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
ENTRY(kvm_daccess_bit) ENTRY(kvm_daccess_bit)
KVM_REFLECT(10) KVM_REFLECT(10)
END(kvm_daccess_bit) END(kvm_daccess_bit)
.org kvm_ia64_ivt+0x2c00 .org kvm_ia64_ivt+0x2c00
///////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////
// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
ENTRY(kvm_break_fault) ENTRY(kvm_break_fault)
mov r31=pr mov r31=pr
mov r19=11 mov r19=11
mov r29=cr.ipsr mov r29=cr.ipsr
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
;; ;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
mov out0=cr.ifa mov out0=cr.ifa
mov out2=cr.isr // FIXME: pity to make this slow access twice mov out2=cr.isr // FIXME: pity to make this slow access twice
mov out3=cr.iim // FIXME: pity to make this slow access twice mov out3=cr.iim // FIXME: pity to make this slow access twice
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
ssm psr.ic ssm psr.ic
;; ;;
srlz.i // guarantee that interruption collection is on srlz.i // guarantee that interruption collection is on
;; ;;
//(p15)ssm psr.i // restore psr.i //(p15)ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp addl r14=@gprel(ia64_leave_hypervisor),gp
;; ;;
KVM_SAVE_REST KVM_SAVE_REST
mov rp=r14 mov rp=r14
;; ;;
adds out1=16,sp adds out1=16,sp
br.call.sptk.many b6=kvm_ia64_handle_break br.call.sptk.many b6=kvm_ia64_handle_break
;; ;;
END(kvm_break_fault) END(kvm_break_fault)
.org kvm_ia64_ivt+0x3000 .org kvm_ia64_ivt+0x3000
///////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////
// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
ENTRY(kvm_interrupt) ENTRY(kvm_interrupt)
mov r31=pr // prepare to save predicates mov r31=pr // prepare to save predicates
mov r19=12 mov r19=12
mov r29=cr.ipsr mov r29=cr.ipsr
;; ;;
tbit.z p6,p7=r29,IA64_PSR_VM_BIT tbit.z p6,p7=r29,IA64_PSR_VM_BIT
tbit.z p0,p15=r29,IA64_PSR_I_BIT tbit.z p0,p15=r29,IA64_PSR_I_BIT
;; ;;
(p7) br.sptk kvm_dispatch_interrupt (p7) br.sptk kvm_dispatch_interrupt
;; ;;
mov r27=ar.rsc /* M */ mov r27=ar.rsc /* M */
mov r20=r1 /* A */ mov r20=r1 /* A */
mov r25=ar.unat /* M */ mov r25=ar.unat /* M */
mov r26=ar.pfs /* I */ mov r26=ar.pfs /* I */
mov r28=cr.iip /* M */ mov r28=cr.iip /* M */
cover /* B (or nothing) */ cover /* B (or nothing) */
;; ;;
mov r1=sp mov r1=sp
;; ;;
invala /* M */ invala /* M */
mov r30=cr.ifs mov r30=cr.ifs
;; ;;
addl r1=-VMM_PT_REGS_SIZE,r1 addl r1=-VMM_PT_REGS_SIZE,r1
;; ;;
adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */
adds r16=PT(CR_IPSR),r1 adds r16=PT(CR_IPSR),r1
;; ;;
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
st8 [r16]=r29 /* save cr.ipsr */ st8 [r16]=r29 /* save cr.ipsr */
;; ;;
lfetch.fault.excl.nt1 [r17] lfetch.fault.excl.nt1 [r17]
mov r29=b0 mov r29=b0
;; ;;
adds r16=PT(R8),r1 /* initialize first base pointer */ adds r16=PT(R8),r1 /* initialize first base pointer */
adds r17=PT(R9),r1 /* initialize second base pointer */ adds r17=PT(R9),r1 /* initialize second base pointer */
mov r18=r0 /* make sure r18 isn't NaT */ mov r18=r0 /* make sure r18 isn't NaT */
;; ;;
.mem.offset 0,0; st8.spill [r16]=r8,16 .mem.offset 0,0; st8.spill [r16]=r8,16
.mem.offset 8,0; st8.spill [r17]=r9,16 .mem.offset 8,0; st8.spill [r17]=r9,16
;; ;;
.mem.offset 0,0; st8.spill [r16]=r10,24 .mem.offset 0,0; st8.spill [r16]=r10,24
.mem.offset 8,0; st8.spill [r17]=r11,24 .mem.offset 8,0; st8.spill [r17]=r11,24
;; ;;
st8 [r16]=r28,16 /* save cr.iip */ st8 [r16]=r28,16 /* save cr.iip */
st8 [r17]=r30,16 /* save cr.ifs */ st8 [r17]=r30,16 /* save cr.ifs */
mov r8=ar.fpsr /* M */ mov r8=ar.fpsr /* M */
mov r9=ar.csd mov r9=ar.csd
mov r10=ar.ssd mov r10=ar.ssd
movl r11=FPSR_DEFAULT /* L-unit */ movl r11=FPSR_DEFAULT /* L-unit */
;; ;;
st8 [r16]=r25,16 /* save ar.unat */ st8 [r16]=r25,16 /* save ar.unat */
st8 [r17]=r26,16 /* save ar.pfs */ st8 [r17]=r26,16 /* save ar.pfs */
shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */
;; ;;
st8 [r16]=r27,16 /* save ar.rsc */ st8 [r16]=r27,16 /* save ar.rsc */
adds r17=16,r17 /* skip over ar_rnat field */ adds r17=16,r17 /* skip over ar_rnat field */
;; ;;
st8 [r17]=r31,16 /* save predicates */ st8 [r17]=r31,16 /* save predicates */
adds r16=16,r16 /* skip over ar_bspstore field */ adds r16=16,r16 /* skip over ar_bspstore field */
;; ;;
st8 [r16]=r29,16 /* save b0 */ st8 [r16]=r29,16 /* save b0 */
st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */
;; ;;
.mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */
.mem.offset 8,0; st8.spill [r17]=r12,16 .mem.offset 8,0; st8.spill [r17]=r12,16
adds r12=-16,r1 adds r12=-16,r1
/* switch to kernel memory stack (with 16 bytes of scratch) */ /* switch to kernel memory stack (with 16 bytes of scratch) */
;; ;;
.mem.offset 0,0; st8.spill [r16]=r13,16 .mem.offset 0,0; st8.spill [r16]=r13,16
.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
;; ;;
.mem.offset 0,0; st8.spill [r16]=r15,16 .mem.offset 0,0; st8.spill [r16]=r15,16
.mem.offset 8,0; st8.spill [r17]=r14,16 .mem.offset 8,0; st8.spill [r17]=r14,16
dep r14=-1,r0,60,4 dep r14=-1,r0,60,4
;; ;;
.mem.offset 0,0; st8.spill [r16]=r2,16 .mem.offset 0,0; st8.spill [r16]=r2,16
.mem.offset 8,0; st8.spill [r17]=r3,16 .mem.offset 8,0; st8.spill [r17]=r3,16
adds r2=VMM_PT_REGS_R16_OFFSET,r1 adds r2=VMM_PT_REGS_R16_OFFSET,r1
adds r14 = VMM_VCPU_GP_OFFSET,r13 adds r14 = VMM_VCPU_GP_OFFSET,r13
;; ;;
mov r8=ar.ccv mov r8=ar.ccv
ld8 r14 = [r14] ld8 r14 = [r14]
;; ;;
mov r1=r14 /* establish kernel global pointer */ mov r1=r14 /* establish kernel global pointer */
;; \ ;; \
bsw.1 bsw.1
;; ;;
alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
mov out0=r13 mov out0=r13
;; ;;
ssm psr.ic ssm psr.ic
;; ;;
srlz.i srlz.i
;; ;;
//(p15) ssm psr.i //(p15) ssm psr.i
adds r3=8,r2 // set up second base pointer for SAVE_REST adds r3=8,r2 // set up second base pointer for SAVE_REST
srlz.i // ensure everybody knows psr.ic is back on srlz.i // ensure everybody knows psr.ic is back on
;; ;;
.mem.offset 0,0; st8.spill [r2]=r16,16 .mem.offset 0,0; st8.spill [r2]=r16,16
.mem.offset 8,0; st8.spill [r3]=r17,16 .mem.offset 8,0; st8.spill [r3]=r17,16
;; ;;
.mem.offset 0,0; st8.spill [r2]=r18,16 .mem.offset 0,0; st8.spill [r2]=r18,16
.mem.offset 8,0; st8.spill [r3]=r19,16 .mem.offset 8,0; st8.spill [r3]=r19,16
;; ;;
.mem.offset 0,0; st8.spill [r2]=r20,16 .mem.offset 0,0; st8.spill [r2]=r20,16
.mem.offset 8,0; st8.spill [r3]=r21,16 .mem.offset 8,0; st8.spill [r3]=r21,16
mov r18=b6 mov r18=b6
;; ;;
.mem.offset 0,0; st8.spill [r2]=r22,16 .mem.offset 0,0; st8.spill [r2]=r22,16
.mem.offset 8,0; st8.spill [r3]=r23,16 .mem.offset 8,0; st8.spill [r3]=r23,16
mov r19=b7 mov r19=b7
;; ;;
.mem.offset 0,0; st8.spill [r2]=r24,16 .mem.offset 0,0; st8.spill [r2]=r24,16
.mem.offset 8,0; st8.spill [r3]=r25,16 .mem.offset 8,0; st8.spill [r3]=r25,16
;; ;;
.mem.offset 0,0; st8.spill [r2]=r26,16 .mem.offset 0,0; st8.spill [r2]=r26,16
.mem.offset 8,0; st8.spill [r3]=r27,16 .mem.offset 8,0; st8.spill [r3]=r27,16
;; ;;
.mem.offset 0,0; st8.spill [r2]=r28,16 .mem.offset 0,0; st8.spill [r2]=r28,16
.mem.offset 8,0; st8.spill [r3]=r29,16 .mem.offset 8,0; st8.spill [r3]=r29,16
;; ;;
.mem.offset 0,0; st8.spill [r2]=r30,16 .mem.offset 0,0; st8.spill [r2]=r30,16
.mem.offset 8,0; st8.spill [r3]=r31,32 .mem.offset 8,0; st8.spill [r3]=r31,32
;; ;;
mov ar.fpsr=r11 /* M-unit */ mov ar.fpsr=r11 /* M-unit */
st8 [r2]=r8,8 /* ar.ccv */ st8 [r2]=r8,8 /* ar.ccv */
adds r24=PT(B6)-PT(F7),r3 adds r24=PT(B6)-PT(F7),r3
;; ;;
stf.spill [r2]=f6,32 stf.spill [r2]=f6,32
stf.spill [r3]=f7,32 stf.spill [r3]=f7,32
;; ;;
stf.spill [r2]=f8,32 stf.spill [r2]=f8,32
stf.spill [r3]=f9,32 stf.spill [r3]=f9,32
;; ;;
stf.spill [r2]=f10 stf.spill [r2]=f10
stf.spill [r3]=f11 stf.spill [r3]=f11
adds r25=PT(B7)-PT(F11),r3 adds r25=PT(B7)-PT(F11),r3
;; ;;
st8 [r24]=r18,16 /* b6 */ st8 [r24]=r18,16 /* b6 */
st8 [r25]=r19,16 /* b7 */ st8 [r25]=r19,16 /* b7 */
;; ;;
st8 [r24]=r9 /* ar.csd */ st8 [r24]=r9 /* ar.csd */
st8 [r25]=r10 /* ar.ssd */ st8 [r25]=r10 /* ar.ssd */
;; ;;
srlz.d // make sure we see the effect of cr.ivr srlz.d // make sure we see the effect of cr.ivr
addl r14=@gprel(ia64_leave_nested),gp addl r14=@gprel(ia64_leave_nested),gp
;; ;;
mov rp=r14 mov rp=r14
br.call.sptk.many b6=kvm_ia64_handle_irq br.call.sptk.many b6=kvm_ia64_handle_irq
;; ;;
END(kvm_interrupt) END(kvm_interrupt)
.global kvm_dispatch_vexirq .global kvm_dispatch_vexirq
...@@ -421,387 +420,385 @@ END(kvm_interrupt) ...@@ -421,387 +420,385 @@ END(kvm_interrupt)
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x3400 Entry 13 (size 64 bundles) Reserved // 0x3400 Entry 13 (size 64 bundles) Reserved
ENTRY(kvm_virtual_exirq) ENTRY(kvm_virtual_exirq)
mov r31=pr mov r31=pr
mov r19=13 mov r19=13
mov r30 =r0 mov r30 =r0
;; ;;
kvm_dispatch_vexirq: kvm_dispatch_vexirq:
cmp.eq p6,p0 = 1,r30 cmp.eq p6,p0 = 1,r30
;; ;;
(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
;; ;;
(p6)ld8 r1 = [r29] (p6) ld8 r1 = [r29]
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,1,0 alloc r14=ar.pfs,0,0,1,0
mov out0=r13 mov out0=r13
ssm psr.ic ssm psr.ic
;; ;;
srlz.i // guarantee that interruption collection is on srlz.i // guarantee that interruption collection is on
;; ;;
//(p15) ssm psr.i // restore psr.i //(p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
KVM_SAVE_REST KVM_SAVE_REST
addl r14=@gprel(ia64_leave_hypervisor),gp addl r14=@gprel(ia64_leave_hypervisor),gp
;; ;;
mov rp=r14 mov rp=r14
br.call.sptk.many b6=kvm_vexirq br.call.sptk.many b6=kvm_vexirq
END(kvm_virtual_exirq) END(kvm_virtual_exirq)
.org kvm_ia64_ivt+0x3800 .org kvm_ia64_ivt+0x3800
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x3800 Entry 14 (size 64 bundles) Reserved // 0x3800 Entry 14 (size 64 bundles) Reserved
KVM_FAULT(14) KVM_FAULT(14)
// this code segment is from 2.6.16.13 // this code segment is from 2.6.16.13
.org kvm_ia64_ivt+0x3c00 .org kvm_ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// 0x3c00 Entry 15 (size 64 bundles) Reserved // 0x3c00 Entry 15 (size 64 bundles) Reserved
KVM_FAULT(15) KVM_FAULT(15)
.org kvm_ia64_ivt+0x4000 .org kvm_ia64_ivt+0x4000
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved // 0x4000 Entry 16 (size 64 bundles) Reserved
KVM_FAULT(16) KVM_FAULT(16)
.org kvm_ia64_ivt+0x4400 .org kvm_ia64_ivt+0x4400
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x4400 Entry 17 (size 64 bundles) Reserved // 0x4400 Entry 17 (size 64 bundles) Reserved
KVM_FAULT(17) KVM_FAULT(17)
.org kvm_ia64_ivt+0x4800 .org kvm_ia64_ivt+0x4800
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x4800 Entry 18 (size 64 bundles) Reserved // 0x4800 Entry 18 (size 64 bundles) Reserved
KVM_FAULT(18) KVM_FAULT(18)
.org kvm_ia64_ivt+0x4c00 .org kvm_ia64_ivt+0x4c00
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x4c00 Entry 19 (size 64 bundles) Reserved // 0x4c00 Entry 19 (size 64 bundles) Reserved
KVM_FAULT(19) KVM_FAULT(19)
.org kvm_ia64_ivt+0x5000 .org kvm_ia64_ivt+0x5000
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x5000 Entry 20 (size 16 bundles) Page Not Present // 0x5000 Entry 20 (size 16 bundles) Page Not Present
ENTRY(kvm_page_not_present) ENTRY(kvm_page_not_present)
KVM_REFLECT(20) KVM_REFLECT(20)
END(kvm_page_not_present) END(kvm_page_not_present)
.org kvm_ia64_ivt+0x5100 .org kvm_ia64_ivt+0x5100
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// 0x5100 Entry 21 (size 16 bundles) Key Permission vector // 0x5100 Entry 21 (size 16 bundles) Key Permission vector
ENTRY(kvm_key_permission) ENTRY(kvm_key_permission)
KVM_REFLECT(21) KVM_REFLECT(21)
END(kvm_key_permission) END(kvm_key_permission)
.org kvm_ia64_ivt+0x5200 .org kvm_ia64_ivt+0x5200
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
ENTRY(kvm_iaccess_rights) ENTRY(kvm_iaccess_rights)
KVM_REFLECT(22) KVM_REFLECT(22)
END(kvm_iaccess_rights) END(kvm_iaccess_rights)
.org kvm_ia64_ivt+0x5300 .org kvm_ia64_ivt+0x5300
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
ENTRY(kvm_daccess_rights) ENTRY(kvm_daccess_rights)
KVM_REFLECT(23) KVM_REFLECT(23)
END(kvm_daccess_rights) END(kvm_daccess_rights)
.org kvm_ia64_ivt+0x5400 .org kvm_ia64_ivt+0x5400
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
ENTRY(kvm_general_exception) ENTRY(kvm_general_exception)
KVM_REFLECT(24) KVM_REFLECT(24)
KVM_FAULT(24) KVM_FAULT(24)
END(kvm_general_exception) END(kvm_general_exception)
.org kvm_ia64_ivt+0x5500 .org kvm_ia64_ivt+0x5500
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
ENTRY(kvm_disabled_fp_reg) ENTRY(kvm_disabled_fp_reg)
KVM_REFLECT(25) KVM_REFLECT(25)
END(kvm_disabled_fp_reg) END(kvm_disabled_fp_reg)
.org kvm_ia64_ivt+0x5600 .org kvm_ia64_ivt+0x5600
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
ENTRY(kvm_nat_consumption) ENTRY(kvm_nat_consumption)
KVM_REFLECT(26) KVM_REFLECT(26)
END(kvm_nat_consumption) END(kvm_nat_consumption)
.org kvm_ia64_ivt+0x5700 .org kvm_ia64_ivt+0x5700
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x5700 Entry 27 (size 16 bundles) Speculation (40) // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
ENTRY(kvm_speculation_vector) ENTRY(kvm_speculation_vector)
KVM_REFLECT(27) KVM_REFLECT(27)
END(kvm_speculation_vector) END(kvm_speculation_vector)
.org kvm_ia64_ivt+0x5800 .org kvm_ia64_ivt+0x5800
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x5800 Entry 28 (size 16 bundles) Reserved // 0x5800 Entry 28 (size 16 bundles) Reserved
KVM_FAULT(28) KVM_FAULT(28)
.org kvm_ia64_ivt+0x5900 .org kvm_ia64_ivt+0x5900
/////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////
// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
ENTRY(kvm_debug_vector) ENTRY(kvm_debug_vector)
KVM_FAULT(29) KVM_FAULT(29)
END(kvm_debug_vector) END(kvm_debug_vector)
.org kvm_ia64_ivt+0x5a00 .org kvm_ia64_ivt+0x5a00
/////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////
// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
ENTRY(kvm_unaligned_access) ENTRY(kvm_unaligned_access)
KVM_REFLECT(30) KVM_REFLECT(30)
END(kvm_unaligned_access) END(kvm_unaligned_access)
.org kvm_ia64_ivt+0x5b00 .org kvm_ia64_ivt+0x5b00
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
ENTRY(kvm_unsupported_data_reference) ENTRY(kvm_unsupported_data_reference)
KVM_REFLECT(31) KVM_REFLECT(31)
END(kvm_unsupported_data_reference) END(kvm_unsupported_data_reference)
.org kvm_ia64_ivt+0x5c00 .org kvm_ia64_ivt+0x5c00
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
ENTRY(kvm_floating_point_fault) ENTRY(kvm_floating_point_fault)
KVM_REFLECT(32) KVM_REFLECT(32)
END(kvm_floating_point_fault) END(kvm_floating_point_fault)
.org kvm_ia64_ivt+0x5d00 .org kvm_ia64_ivt+0x5d00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
ENTRY(kvm_floating_point_trap) ENTRY(kvm_floating_point_trap)
KVM_REFLECT(33) KVM_REFLECT(33)
END(kvm_floating_point_trap) END(kvm_floating_point_trap)
.org kvm_ia64_ivt+0x5e00 .org kvm_ia64_ivt+0x5e00
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
ENTRY(kvm_lower_privilege_trap) ENTRY(kvm_lower_privilege_trap)
KVM_REFLECT(34) KVM_REFLECT(34)
END(kvm_lower_privilege_trap) END(kvm_lower_privilege_trap)
.org kvm_ia64_ivt+0x5f00 .org kvm_ia64_ivt+0x5f00
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
ENTRY(kvm_taken_branch_trap) ENTRY(kvm_taken_branch_trap)
KVM_REFLECT(35) KVM_REFLECT(35)
END(kvm_taken_branch_trap) END(kvm_taken_branch_trap)
.org kvm_ia64_ivt+0x6000 .org kvm_ia64_ivt+0x6000
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
ENTRY(kvm_single_step_trap) ENTRY(kvm_single_step_trap)
KVM_REFLECT(36) KVM_REFLECT(36)
END(kvm_single_step_trap) END(kvm_single_step_trap)
.global kvm_virtualization_fault_back .global kvm_virtualization_fault_back
.org kvm_ia64_ivt+0x6100 .org kvm_ia64_ivt+0x6100
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
ENTRY(kvm_virtualization_fault) ENTRY(kvm_virtualization_fault)
mov r31=pr mov r31=pr
adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
;; ;;
st8 [r16] = r1 st8 [r16] = r1
adds r17 = VMM_VCPU_GP_OFFSET, r21 adds r17 = VMM_VCPU_GP_OFFSET, r21
;; ;;
ld8 r1 = [r17] ld8 r1 = [r17]
cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
cmp.eq p9,p0=EVENT_RSM,r24 cmp.eq p9,p0=EVENT_RSM,r24
cmp.eq p10,p0=EVENT_SSM,r24 cmp.eq p10,p0=EVENT_SSM,r24
cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
cmp.eq p12,p0=EVENT_THASH,r24 cmp.eq p12,p0=EVENT_THASH,r24
(p6) br.dptk.many kvm_asm_mov_from_ar (p6) br.dptk.many kvm_asm_mov_from_ar
(p7) br.dptk.many kvm_asm_mov_from_rr (p7) br.dptk.many kvm_asm_mov_from_rr
(p8) br.dptk.many kvm_asm_mov_to_rr (p8) br.dptk.many kvm_asm_mov_to_rr
(p9) br.dptk.many kvm_asm_rsm (p9) br.dptk.many kvm_asm_rsm
(p10) br.dptk.many kvm_asm_ssm (p10) br.dptk.many kvm_asm_ssm
(p11) br.dptk.many kvm_asm_mov_to_psr (p11) br.dptk.many kvm_asm_mov_to_psr
(p12) br.dptk.many kvm_asm_thash (p12) br.dptk.many kvm_asm_thash
;; ;;
kvm_virtualization_fault_back: kvm_virtualization_fault_back:
adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
;; ;;
ld8 r1 = [r16] ld8 r1 = [r16]
;; ;;
mov r19=37 mov r19=37
adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
;; ;;
st8 [r16] = r24 st8 [r16] = r24
st8 [r17] = r25 st8 [r17] = r25
;; ;;
cmp.ne p6,p0=EVENT_RFI, r24 cmp.ne p6,p0=EVENT_RFI, r24
(p6) br.sptk kvm_dispatch_virtualization_fault (p6) br.sptk kvm_dispatch_virtualization_fault
;; ;;
adds r18=VMM_VPD_BASE_OFFSET,r21 adds r18=VMM_VPD_BASE_OFFSET,r21
;; ;;
ld8 r18=[r18] ld8 r18=[r18]
;; ;;
adds r18=VMM_VPD_VIFS_OFFSET,r18 adds r18=VMM_VPD_VIFS_OFFSET,r18
;; ;;
ld8 r18=[r18] ld8 r18=[r18]
;; ;;
tbit.z p6,p0=r18,63 tbit.z p6,p0=r18,63
(p6) br.sptk kvm_dispatch_virtualization_fault (p6) br.sptk kvm_dispatch_virtualization_fault
;; ;;
//if vifs.v=1 desert current register frame //if vifs.v=1 desert current register frame
alloc r18=ar.pfs,0,0,0,0 alloc r18=ar.pfs,0,0,0,0
br.sptk kvm_dispatch_virtualization_fault br.sptk kvm_dispatch_virtualization_fault
END(kvm_virtualization_fault) END(kvm_virtualization_fault)
.org kvm_ia64_ivt+0x6200 .org kvm_ia64_ivt+0x6200
////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////
// 0x6200 Entry 38 (size 16 bundles) Reserved // 0x6200 Entry 38 (size 16 bundles) Reserved
KVM_FAULT(38) KVM_FAULT(38)
.org kvm_ia64_ivt+0x6300 .org kvm_ia64_ivt+0x6300
///////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////
// 0x6300 Entry 39 (size 16 bundles) Reserved // 0x6300 Entry 39 (size 16 bundles) Reserved
KVM_FAULT(39) KVM_FAULT(39)
.org kvm_ia64_ivt+0x6400 .org kvm_ia64_ivt+0x6400
///////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////
// 0x6400 Entry 40 (size 16 bundles) Reserved // 0x6400 Entry 40 (size 16 bundles) Reserved
KVM_FAULT(40) KVM_FAULT(40)
.org kvm_ia64_ivt+0x6500 .org kvm_ia64_ivt+0x6500
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
// 0x6500 Entry 41 (size 16 bundles) Reserved // 0x6500 Entry 41 (size 16 bundles) Reserved
KVM_FAULT(41) KVM_FAULT(41)
.org kvm_ia64_ivt+0x6600 .org kvm_ia64_ivt+0x6600
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
// 0x6600 Entry 42 (size 16 bundles) Reserved // 0x6600 Entry 42 (size 16 bundles) Reserved
KVM_FAULT(42) KVM_FAULT(42)
.org kvm_ia64_ivt+0x6700 .org kvm_ia64_ivt+0x6700
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
// 0x6700 Entry 43 (size 16 bundles) Reserved // 0x6700 Entry 43 (size 16 bundles) Reserved
KVM_FAULT(43) KVM_FAULT(43)
.org kvm_ia64_ivt+0x6800 .org kvm_ia64_ivt+0x6800
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
// 0x6800 Entry 44 (size 16 bundles) Reserved // 0x6800 Entry 44 (size 16 bundles) Reserved
KVM_FAULT(44) KVM_FAULT(44)
.org kvm_ia64_ivt+0x6900 .org kvm_ia64_ivt+0x6900
/////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////
// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
ENTRY(kvm_ia32_exception) ENTRY(kvm_ia32_exception)
KVM_FAULT(45) KVM_FAULT(45)
END(kvm_ia32_exception) END(kvm_ia32_exception)
.org kvm_ia64_ivt+0x6a00 .org kvm_ia64_ivt+0x6a00
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
ENTRY(kvm_ia32_intercept) ENTRY(kvm_ia32_intercept)
KVM_FAULT(47) KVM_FAULT(47)
END(kvm_ia32_intercept) END(kvm_ia32_intercept)
.org kvm_ia64_ivt+0x6c00 .org kvm_ia64_ivt+0x6c00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x6c00 Entry 48 (size 16 bundles) Reserved // 0x6c00 Entry 48 (size 16 bundles) Reserved
KVM_FAULT(48) KVM_FAULT(48)
.org kvm_ia64_ivt+0x6d00 .org kvm_ia64_ivt+0x6d00
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x6d00 Entry 49 (size 16 bundles) Reserved // 0x6d00 Entry 49 (size 16 bundles) Reserved
KVM_FAULT(49) KVM_FAULT(49)
.org kvm_ia64_ivt+0x6e00 .org kvm_ia64_ivt+0x6e00
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
// 0x6e00 Entry 50 (size 16 bundles) Reserved // 0x6e00 Entry 50 (size 16 bundles) Reserved
KVM_FAULT(50) KVM_FAULT(50)
.org kvm_ia64_ivt+0x6f00 .org kvm_ia64_ivt+0x6f00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x6f00 Entry 51 (size 16 bundles) Reserved // 0x6f00 Entry 51 (size 16 bundles) Reserved
KVM_FAULT(52) KVM_FAULT(52)
.org kvm_ia64_ivt+0x7100 .org kvm_ia64_ivt+0x7100
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x7100 Entry 53 (size 16 bundles) Reserved // 0x7100 Entry 53 (size 16 bundles) Reserved
KVM_FAULT(53) KVM_FAULT(53)
.org kvm_ia64_ivt+0x7200 .org kvm_ia64_ivt+0x7200
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7200 Entry 54 (size 16 bundles) Reserved // 0x7200 Entry 54 (size 16 bundles) Reserved
KVM_FAULT(54) KVM_FAULT(54)
.org kvm_ia64_ivt+0x7300 .org kvm_ia64_ivt+0x7300
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x7300 Entry 55 (size 16 bundles) Reserved // 0x7300 Entry 55 (size 16 bundles) Reserved
KVM_FAULT(55) KVM_FAULT(55)
.org kvm_ia64_ivt+0x7400 .org kvm_ia64_ivt+0x7400
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x7400 Entry 56 (size 16 bundles) Reserved // 0x7400 Entry 56 (size 16 bundles) Reserved
KVM_FAULT(56) KVM_FAULT(56)
.org kvm_ia64_ivt+0x7500 .org kvm_ia64_ivt+0x7500
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7500 Entry 57 (size 16 bundles) Reserved // 0x7500 Entry 57 (size 16 bundles) Reserved
KVM_FAULT(57) KVM_FAULT(57)
.org kvm_ia64_ivt+0x7600 .org kvm_ia64_ivt+0x7600
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7600 Entry 58 (size 16 bundles) Reserved // 0x7600 Entry 58 (size 16 bundles) Reserved
KVM_FAULT(58) KVM_FAULT(58)
.org kvm_ia64_ivt+0x7700 .org kvm_ia64_ivt+0x7700
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x7700 Entry 59 (size 16 bundles) Reserved // 0x7700 Entry 59 (size 16 bundles) Reserved
KVM_FAULT(59) KVM_FAULT(59)
.org kvm_ia64_ivt+0x7800 .org kvm_ia64_ivt+0x7800
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x7800 Entry 60 (size 16 bundles) Reserved // 0x7800 Entry 60 (size 16 bundles) Reserved
KVM_FAULT(60) KVM_FAULT(60)
.org kvm_ia64_ivt+0x7900 .org kvm_ia64_ivt+0x7900
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7900 Entry 61 (size 16 bundles) Reserved // 0x7900 Entry 61 (size 16 bundles) Reserved
KVM_FAULT(61) KVM_FAULT(61)
.org kvm_ia64_ivt+0x7a00 .org kvm_ia64_ivt+0x7a00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7a00 Entry 62 (size 16 bundles) Reserved // 0x7a00 Entry 62 (size 16 bundles) Reserved
KVM_FAULT(62) KVM_FAULT(62)
.org kvm_ia64_ivt+0x7b00 .org kvm_ia64_ivt+0x7b00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7b00 Entry 63 (size 16 bundles) Reserved // 0x7b00 Entry 63 (size 16 bundles) Reserved
KVM_FAULT(63) KVM_FAULT(63)
.org kvm_ia64_ivt+0x7c00 .org kvm_ia64_ivt+0x7c00
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x7c00 Entry 64 (size 16 bundles) Reserved // 0x7c00 Entry 64 (size 16 bundles) Reserved
KVM_FAULT(64) KVM_FAULT(64)
.org kvm_ia64_ivt+0x7d00 .org kvm_ia64_ivt+0x7d00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7d00 Entry 65 (size 16 bundles) Reserved // 0x7d00 Entry 65 (size 16 bundles) Reserved
KVM_FAULT(65) KVM_FAULT(65)
.org kvm_ia64_ivt+0x7e00 .org kvm_ia64_ivt+0x7e00
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// 0x7e00 Entry 66 (size 16 bundles) Reserved // 0x7e00 Entry 66 (size 16 bundles) Reserved
KVM_FAULT(66) KVM_FAULT(66)
.org kvm_ia64_ivt+0x7f00 .org kvm_ia64_ivt+0x7f00
//////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////
// 0x7f00 Entry 67 (size 16 bundles) Reserved // 0x7f00 Entry 67 (size 16 bundles) Reserved
KVM_FAULT(67) KVM_FAULT(67)
.org kvm_ia64_ivt+0x8000 .org kvm_ia64_ivt+0x8000
// There is no particular reason for this code to be here, other than that // There is no particular reason for this code to be here, other than that
...@@ -811,132 +808,128 @@ END(kvm_ia32_intercept) ...@@ -811,132 +808,128 @@ END(kvm_ia32_intercept)
ENTRY(kvm_dtlb_miss_dispatch) ENTRY(kvm_dtlb_miss_dispatch)
mov r19 = 2 mov r19 = 2
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,3,0 alloc r14=ar.pfs,0,0,3,0
mov out0=cr.ifa mov out0=cr.ifa
mov out1=r15 mov out1=r15
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
ssm psr.ic ssm psr.ic
;; ;;
srlz.i // guarantee that interruption collection is on srlz.i // guarantee that interruption collection is on
;; ;;
//(p15) ssm psr.i // restore psr.i //(p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;; ;;
KVM_SAVE_REST KVM_SAVE_REST
KVM_SAVE_EXTRA KVM_SAVE_EXTRA
mov rp=r14 mov rp=r14
;; ;;
adds out2=16,r12 adds out2=16,r12
br.call.sptk.many b6=kvm_page_fault br.call.sptk.many b6=kvm_page_fault
END(kvm_dtlb_miss_dispatch) END(kvm_dtlb_miss_dispatch)
ENTRY(kvm_itlb_miss_dispatch) ENTRY(kvm_itlb_miss_dispatch)
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,3,0 alloc r14=ar.pfs,0,0,3,0
mov out0=cr.ifa mov out0=cr.ifa
mov out1=r15 mov out1=r15
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
ssm psr.ic ssm psr.ic
;; ;;
srlz.i // guarantee that interruption collection is on srlz.i // guarantee that interruption collection is on
;; ;;
//(p15) ssm psr.i // restore psr.i //(p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp addl r14=@gprel(ia64_leave_hypervisor),gp
;; ;;
KVM_SAVE_REST KVM_SAVE_REST
mov rp=r14 mov rp=r14
;; ;;
adds out2=16,r12 adds out2=16,r12
br.call.sptk.many b6=kvm_page_fault br.call.sptk.many b6=kvm_page_fault
END(kvm_itlb_miss_dispatch) END(kvm_itlb_miss_dispatch)
ENTRY(kvm_dispatch_reflection) ENTRY(kvm_dispatch_reflection)
/* /*
* Input: * Input:
* psr.ic: off * psr.ic: off
* r19: intr type (offset into ivt, see ia64_int.h) * r19: intr type (offset into ivt, see ia64_int.h)
* r31: contains saved predicates (pr) * r31: contains saved predicates (pr)
*/ */
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
alloc r14=ar.pfs,0,0,5,0 alloc r14=ar.pfs,0,0,5,0
mov out0=cr.ifa mov out0=cr.ifa
mov out1=cr.isr mov out1=cr.isr
mov out2=cr.iim mov out2=cr.iim
mov out3=r15 mov out3=r15
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
ssm psr.ic ssm psr.ic
;; ;;
srlz.i // guarantee that interruption collection is on srlz.i // guarantee that interruption collection is on
;; ;;
//(p15) ssm psr.i // restore psr.i //(p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp addl r14=@gprel(ia64_leave_hypervisor),gp
;; ;;
KVM_SAVE_REST KVM_SAVE_REST
mov rp=r14 mov rp=r14
;; ;;
adds out4=16,r12 adds out4=16,r12
br.call.sptk.many b6=reflect_interruption br.call.sptk.many b6=reflect_interruption
END(kvm_dispatch_reflection) END(kvm_dispatch_reflection)
ENTRY(kvm_dispatch_virtualization_fault) ENTRY(kvm_dispatch_virtualization_fault)
adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
;; ;;
st8 [r16] = r24 st8 [r16] = r24
st8 [r17] = r25 st8 [r17] = r25
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
;; ;;
alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
mov out0=r13 //vcpu mov out0=r13 //vcpu
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
ssm psr.ic ssm psr.ic
;; ;;
srlz.i // guarantee that interruption collection is on srlz.i // guarantee that interruption collection is on
;; ;;
//(p15) ssm psr.i // restore psr.i //(p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;; ;;
KVM_SAVE_REST KVM_SAVE_REST
KVM_SAVE_EXTRA KVM_SAVE_EXTRA
mov rp=r14 mov rp=r14
;; ;;
adds out1=16,sp //regs adds out1=16,sp //regs
br.call.sptk.many b6=kvm_emulate br.call.sptk.many b6=kvm_emulate
END(kvm_dispatch_virtualization_fault) END(kvm_dispatch_virtualization_fault)
ENTRY(kvm_dispatch_interrupt) ENTRY(kvm_dispatch_interrupt)
KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
;; ;;
alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
//mov out0=cr.ivr // pass cr.ivr as first arg adds r3=8,r2 // set up second base pointer for SAVE_REST
adds r3=8,r2 // set up second base pointer for SAVE_REST ;;
;; ssm psr.ic
ssm psr.ic ;;
;; srlz.i
srlz.i ;;
;; //(p15) ssm psr.i
//(p15) ssm psr.i addl r14=@gprel(ia64_leave_hypervisor),gp
addl r14=@gprel(ia64_leave_hypervisor),gp ;;
;; KVM_SAVE_REST
KVM_SAVE_REST mov rp=r14
mov rp=r14 ;;
;; mov out0=r13 // pass pointer to pt_regs as second arg
mov out0=r13 // pass pointer to pt_regs as second arg br.call.sptk.many b6=kvm_ia64_handle_irq
br.call.sptk.many b6=kvm_ia64_handle_irq
END(kvm_dispatch_interrupt) END(kvm_dispatch_interrupt)
GLOBAL_ENTRY(ia64_leave_nested) GLOBAL_ENTRY(ia64_leave_nested)
rsm psr.i rsm psr.i
;; ;;
...@@ -1015,7 +1008,7 @@ GLOBAL_ENTRY(ia64_leave_nested) ...@@ -1015,7 +1008,7 @@ GLOBAL_ENTRY(ia64_leave_nested)
;; ;;
ldf.fill f11=[r2] ldf.fill f11=[r2]
// mov r18=r13 // mov r18=r13
// mov r21=r13 // mov r21=r13
adds r16=PT(CR_IPSR)+16,r12 adds r16=PT(CR_IPSR)+16,r12
adds r17=PT(CR_IIP)+16,r12 adds r17=PT(CR_IIP)+16,r12
;; ;;
...@@ -1065,138 +1058,135 @@ GLOBAL_ENTRY(ia64_leave_nested) ...@@ -1065,138 +1058,135 @@ GLOBAL_ENTRY(ia64_leave_nested)
rfi rfi
END(ia64_leave_nested) END(ia64_leave_nested)
GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
/* /*
* work.need_resched etc. mustn't get changed * work.need_resched etc. mustn't get changed
*by this CPU before it returns to *by this CPU before it returns to
;; * user- or fsys-mode, hence we disable interrupts early on:
* user- or fsys-mode, hence we disable interrupts early on: */
*/ adds r2 = PT(R4)+16,r12
adds r2 = PT(R4)+16,r12 adds r3 = PT(R5)+16,r12
adds r3 = PT(R5)+16,r12 adds r8 = PT(EML_UNAT)+16,r12
adds r8 = PT(EML_UNAT)+16,r12 ;;
;; ld8 r8 = [r8]
ld8 r8 = [r8] ;;
;; mov ar.unat=r8
mov ar.unat=r8 ;;
;; ld8.fill r4=[r2],16 //load r4
ld8.fill r4=[r2],16 //load r4 ld8.fill r5=[r3],16 //load r5
ld8.fill r5=[r3],16 //load r5 ;;
;; ld8.fill r6=[r2] //load r6
ld8.fill r6=[r2] //load r6 ld8.fill r7=[r3] //load r7
ld8.fill r7=[r3] //load r7 ;;
;;
END(ia64_leave_hypervisor_prepare) END(ia64_leave_hypervisor_prepare)
//fall through //fall through
GLOBAL_ENTRY(ia64_leave_hypervisor) GLOBAL_ENTRY(ia64_leave_hypervisor)
rsm psr.i rsm psr.i
;; ;;
br.call.sptk.many b0=leave_hypervisor_tail br.call.sptk.many b0=leave_hypervisor_tail
;; ;;
adds r20=PT(PR)+16,r12 adds r20=PT(PR)+16,r12
adds r8=PT(EML_UNAT)+16,r12 adds r8=PT(EML_UNAT)+16,r12
;; ;;
ld8 r8=[r8] ld8 r8=[r8]
;; ;;
mov ar.unat=r8 mov ar.unat=r8
;; ;;
lfetch [r20],PT(CR_IPSR)-PT(PR) lfetch [r20],PT(CR_IPSR)-PT(PR)
adds r2 = PT(B6)+16,r12 adds r2 = PT(B6)+16,r12
adds r3 = PT(B7)+16,r12 adds r3 = PT(B7)+16,r12
;; ;;
lfetch [r20] lfetch [r20]
;; ;;
ld8 r24=[r2],16 /* B6 */ ld8 r24=[r2],16 /* B6 */
ld8 r25=[r3],16 /* B7 */ ld8 r25=[r3],16 /* B7 */
;; ;;
ld8 r26=[r2],16 /* ar_csd */ ld8 r26=[r2],16 /* ar_csd */
ld8 r27=[r3],16 /* ar_ssd */ ld8 r27=[r3],16 /* ar_ssd */
mov b6 = r24 mov b6 = r24
;; ;;
ld8.fill r8=[r2],16 ld8.fill r8=[r2],16
ld8.fill r9=[r3],16 ld8.fill r9=[r3],16
mov b7 = r25 mov b7 = r25
;; ;;
mov ar.csd = r26 mov ar.csd = r26
mov ar.ssd = r27 mov ar.ssd = r27
;; ;;
ld8.fill r10=[r2],PT(R15)-PT(R10) ld8.fill r10=[r2],PT(R15)-PT(R10)
ld8.fill r11=[r3],PT(R14)-PT(R11) ld8.fill r11=[r3],PT(R14)-PT(R11)
;; ;;
ld8.fill r15=[r2],PT(R16)-PT(R15) ld8.fill r15=[r2],PT(R16)-PT(R15)
ld8.fill r14=[r3],PT(R17)-PT(R14) ld8.fill r14=[r3],PT(R17)-PT(R14)
;; ;;
ld8.fill r16=[r2],16 ld8.fill r16=[r2],16
ld8.fill r17=[r3],16 ld8.fill r17=[r3],16
;; ;;
ld8.fill r18=[r2],16 ld8.fill r18=[r2],16
ld8.fill r19=[r3],16 ld8.fill r19=[r3],16
;; ;;
ld8.fill r20=[r2],16 ld8.fill r20=[r2],16
ld8.fill r21=[r3],16 ld8.fill r21=[r3],16
;; ;;
ld8.fill r22=[r2],16 ld8.fill r22=[r2],16
ld8.fill r23=[r3],16 ld8.fill r23=[r3],16
;; ;;
ld8.fill r24=[r2],16 ld8.fill r24=[r2],16
ld8.fill r25=[r3],16 ld8.fill r25=[r3],16
;; ;;
ld8.fill r26=[r2],16 ld8.fill r26=[r2],16
ld8.fill r27=[r3],16 ld8.fill r27=[r3],16
;; ;;
ld8.fill r28=[r2],16 ld8.fill r28=[r2],16
ld8.fill r29=[r3],16 ld8.fill r29=[r3],16
;; ;;
ld8.fill r30=[r2],PT(F6)-PT(R30) ld8.fill r30=[r2],PT(F6)-PT(R30)
ld8.fill r31=[r3],PT(F7)-PT(R31) ld8.fill r31=[r3],PT(F7)-PT(R31)
;; ;;
rsm psr.i | psr.ic rsm psr.i | psr.ic
// initiate turning off of interrupt and interruption collection // initiate turning off of interrupt and interruption collection
invala // invalidate ALAT invala // invalidate ALAT
;; ;;
srlz.i // ensure interruption collection is off srlz.i // ensure interruption collection is off
;; ;;
bsw.0 bsw.0
;; ;;
adds r16 = PT(CR_IPSR)+16,r12 adds r16 = PT(CR_IPSR)+16,r12
adds r17 = PT(CR_IIP)+16,r12 adds r17 = PT(CR_IIP)+16,r12
mov r21=r13 // get current mov r21=r13 // get current
;; ;;
ld8 r31=[r16],16 // load cr.ipsr ld8 r31=[r16],16 // load cr.ipsr
ld8 r30=[r17],16 // load cr.iip ld8 r30=[r17],16 // load cr.iip
;; ;;
ld8 r29=[r16],16 // load cr.ifs ld8 r29=[r16],16 // load cr.ifs
ld8 r28=[r17],16 // load ar.unat ld8 r28=[r17],16 // load ar.unat
;; ;;
ld8 r27=[r16],16 // load ar.pfs ld8 r27=[r16],16 // load ar.pfs
ld8 r26=[r17],16 // load ar.rsc ld8 r26=[r17],16 // load ar.rsc
;; ;;
ld8 r25=[r16],16 // load ar.rnat ld8 r25=[r16],16 // load ar.rnat
ld8 r24=[r17],16 // load ar.bspstore ld8 r24=[r17],16 // load ar.bspstore
;; ;;
ld8 r23=[r16],16 // load predicates ld8 r23=[r16],16 // load predicates
ld8 r22=[r17],16 // load b0 ld8 r22=[r17],16 // load b0
;; ;;
ld8 r20=[r16],16 // load ar.rsc value for "loadrs" ld8 r20=[r16],16 // load ar.rsc value for "loadrs"
ld8.fill r1=[r17],16 //load r1 ld8.fill r1=[r17],16 //load r1
;; ;;
ld8.fill r12=[r16],16 //load r12 ld8.fill r12=[r16],16 //load r12
ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13
;; ;;
ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
;; ;;
ld8.fill r3=[r16] //load r3 ld8.fill r3=[r16] //load r3
ld8 r18=[r17] //load ar_ccv ld8 r18=[r17] //load ar_ccv
;; ;;
mov ar.fpsr=r19 mov ar.fpsr=r19
mov ar.ccv=r18 mov ar.ccv=r18
shr.u r18=r20,16 shr.u r18=r20,16
;; ;;
kvm_rbs_switch: kvm_rbs_switch:
mov r19=96 mov r19=96
kvm_dont_preserve_current_frame: kvm_dont_preserve_current_frame:
/* /*
...@@ -1208,76 +1198,76 @@ kvm_dont_preserve_current_frame: ...@@ -1208,76 +1198,76 @@ kvm_dont_preserve_current_frame:
# define pReturn p7 # define pReturn p7
# define Nregs 14 # define Nregs 14
alloc loc0=ar.pfs,2,Nregs-2,2,0 alloc loc0=ar.pfs,2,Nregs-2,2,0
shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize
;; ;;
mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" mov ar.rsc=r20 // load ar.rsc to be used for "loadrs"
shladd in0=loc1,3,r19 shladd in0=loc1,3,r19
mov in1=0 mov in1=0
;; ;;
TEXT_ALIGN(32) TEXT_ALIGN(32)
kvm_rse_clear_invalid: kvm_rse_clear_invalid:
alloc loc0=ar.pfs,2,Nregs-2,2,0 alloc loc0=ar.pfs,2,Nregs-2,2,0
cmp.lt pRecurse,p0=Nregs*8,in0 cmp.lt pRecurse,p0=Nregs*8,in0
// if more than Nregs regs left to clear, (re)curse // if more than Nregs regs left to clear, (re)curse
add out0=-Nregs*8,in0 add out0=-Nregs*8,in0
add out1=1,in1 // increment recursion count add out1=1,in1 // increment recursion count
mov loc1=0 mov loc1=0
mov loc2=0 mov loc2=0
;; ;;
mov loc3=0 mov loc3=0
mov loc4=0 mov loc4=0
mov loc5=0 mov loc5=0
mov loc6=0 mov loc6=0
mov loc7=0 mov loc7=0
(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
;; ;;
mov loc8=0 mov loc8=0
mov loc9=0 mov loc9=0
cmp.ne pReturn,p0=r0,in1 cmp.ne pReturn,p0=r0,in1
// if recursion count != 0, we need to do a br.ret // if recursion count != 0, we need to do a br.ret
mov loc10=0 mov loc10=0
mov loc11=0 mov loc11=0
(pReturn) br.ret.dptk.many b0 (pReturn) br.ret.dptk.many b0
# undef pRecurse # undef pRecurse
# undef pReturn # undef pReturn
// loadrs has already been shifted // loadrs has already been shifted
alloc r16=ar.pfs,0,0,0,0 // drop current register frame alloc r16=ar.pfs,0,0,0,0 // drop current register frame
;; ;;
loadrs loadrs
;; ;;
mov ar.bspstore=r24 mov ar.bspstore=r24
;; ;;
mov ar.unat=r28 mov ar.unat=r28
mov ar.rnat=r25 mov ar.rnat=r25
mov ar.rsc=r26 mov ar.rsc=r26
;; ;;
mov cr.ipsr=r31 mov cr.ipsr=r31
mov cr.iip=r30 mov cr.iip=r30
mov cr.ifs=r29 mov cr.ifs=r29
mov ar.pfs=r27 mov ar.pfs=r27
adds r18=VMM_VPD_BASE_OFFSET,r21 adds r18=VMM_VPD_BASE_OFFSET,r21
;; ;;
ld8 r18=[r18] //vpd ld8 r18=[r18] //vpd
adds r17=VMM_VCPU_ISR_OFFSET,r21 adds r17=VMM_VCPU_ISR_OFFSET,r21
;; ;;
ld8 r17=[r17] ld8 r17=[r17]
adds r19=VMM_VPD_VPSR_OFFSET,r18 adds r19=VMM_VPD_VPSR_OFFSET,r18
;; ;;
ld8 r19=[r19] //vpsr ld8 r19=[r19] //vpsr
mov r25=r18 mov r25=r18
adds r16= VMM_VCPU_GP_OFFSET,r21 adds r16= VMM_VCPU_GP_OFFSET,r21
;; ;;
ld8 r16= [r16] // Put gp in r24 ld8 r16= [r16] // Put gp in r24
movl r24=@gprel(ia64_vmm_entry) // calculate return address movl r24=@gprel(ia64_vmm_entry) // calculate return address
;; ;;
add r24=r24,r16 add r24=r24,r16
;; ;;
br.sptk.many kvm_vps_sync_write // call the service br.sptk.many kvm_vps_sync_write // call the service
;; ;;
END(ia64_leave_hypervisor) END(ia64_leave_hypervisor)
// fall through // fall through
GLOBAL_ENTRY(ia64_vmm_entry) GLOBAL_ENTRY(ia64_vmm_entry)
...@@ -1290,16 +1280,14 @@ GLOBAL_ENTRY(ia64_vmm_entry) ...@@ -1290,16 +1280,14 @@ GLOBAL_ENTRY(ia64_vmm_entry)
* r22:b0 * r22:b0
* r23:predicate * r23:predicate
*/ */
mov r24=r22 mov r24=r22
mov r25=r18 mov r25=r18
tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
(p1) br.cond.sptk.few kvm_vps_resume_normal (p1) br.cond.sptk.few kvm_vps_resume_normal
(p2) br.cond.sptk.many kvm_vps_resume_handler (p2) br.cond.sptk.many kvm_vps_resume_handler
;; ;;
END(ia64_vmm_entry) END(ia64_vmm_entry)
/* /*
* extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
* u64 arg3, u64 arg4, u64 arg5, * u64 arg3, u64 arg4, u64 arg5,
...@@ -1317,88 +1305,88 @@ psrsave = loc2 ...@@ -1317,88 +1305,88 @@ psrsave = loc2
entry = loc3 entry = loc3
hostret = r24 hostret = r24
alloc pfssave=ar.pfs,4,4,0,0 alloc pfssave=ar.pfs,4,4,0,0
mov rpsave=rp mov rpsave=rp
adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
;; ;;
ld8 entry=[entry] ld8 entry=[entry]
1: mov hostret=ip 1: mov hostret=ip
mov r25=in1 // copy arguments mov r25=in1 // copy arguments
mov r26=in2 mov r26=in2
mov r27=in3 mov r27=in3
mov psrsave=psr mov psrsave=psr
;; ;;
tbit.nz p6,p0=psrsave,14 // IA64_PSR_I tbit.nz p6,p0=psrsave,14 // IA64_PSR_I
tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC
;; ;;
add hostret=2f-1b,hostret // calculate return address add hostret=2f-1b,hostret // calculate return address
add entry=entry,in0 add entry=entry,in0
;; ;;
rsm psr.i | psr.ic rsm psr.i | psr.ic
;; ;;
srlz.i srlz.i
mov b6=entry mov b6=entry
br.cond.sptk b6 // call the service br.cond.sptk b6 // call the service
2: 2:
// Architectural sequence for enabling interrupts if necessary // Architectural sequence for enabling interrupts if necessary
(p7) ssm psr.ic (p7) ssm psr.ic
;; ;;
(p7) srlz.i (p7) srlz.i
;; ;;
//(p6) ssm psr.i //(p6) ssm psr.i
;; ;;
mov rp=rpsave mov rp=rpsave
mov ar.pfs=pfssave mov ar.pfs=pfssave
mov r8=r31 mov r8=r31
;; ;;
srlz.d srlz.d
br.ret.sptk rp br.ret.sptk rp
END(ia64_call_vsa) END(ia64_call_vsa)
#define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100)
GLOBAL_ENTRY(vmm_reset_entry) GLOBAL_ENTRY(vmm_reset_entry)
//set up ipsr, iip, vpd.vpsr, dcr //set up ipsr, iip, vpd.vpsr, dcr
// For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
// For DCR: all bits 0 // For DCR: all bits 0
bsw.0 bsw.0
;; ;;
mov r21 =r13 mov r21 =r13
adds r14=-VMM_PT_REGS_SIZE, r12 adds r14=-VMM_PT_REGS_SIZE, r12
;; ;;
movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
movl r10=0x8000000000000000 movl r10=0x8000000000000000
adds r16=PT(CR_IIP), r14 adds r16=PT(CR_IIP), r14
adds r20=PT(R1), r14 adds r20=PT(R1), r14
;; ;;
rsm psr.ic | psr.i rsm psr.ic | psr.i
;; ;;
srlz.i srlz.i
;; ;;
mov ar.rsc = 0 mov ar.rsc = 0
;; ;;
flushrs flushrs
;; ;;
mov ar.bspstore = 0 mov ar.bspstore = 0
// clear BSPSTORE // clear BSPSTORE
;; ;;
mov cr.ipsr=r6 mov cr.ipsr=r6
mov cr.ifs=r10 mov cr.ifs=r10
ld8 r4 = [r16] // Set init iip for first run. ld8 r4 = [r16] // Set init iip for first run.
ld8 r1 = [r20] ld8 r1 = [r20]
;; ;;
mov cr.iip=r4 mov cr.iip=r4
adds r16=VMM_VPD_BASE_OFFSET,r13 adds r16=VMM_VPD_BASE_OFFSET,r13
;; ;;
ld8 r18=[r16] ld8 r18=[r16]
;; ;;
adds r19=VMM_VPD_VPSR_OFFSET,r18 adds r19=VMM_VPD_VPSR_OFFSET,r18
;; ;;
ld8 r19=[r19] ld8 r19=[r19]
mov r17=r0 mov r17=r0
mov r22=r0 mov r22=r0
mov r23=r0 mov r23=r0
br.cond.sptk ia64_vmm_entry br.cond.sptk ia64_vmm_entry
br.ret.sptk b0 br.ret.sptk b0
END(vmm_reset_entry) END(vmm_reset_entry)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册