提交 64604e09 编写于 作者: A Avi Kivity

Merge branch 'for-upstream' of git://github.com/agraf/linux-2.6 into next

ppc queue from Alex Graf:

 * Prepare some of the booke code for 64 bit support
 * BookE: Fix ESR flag in DSI
 * BookE: Add rfci emulation

* 'for-upstream' of git://github.com/agraf/linux-2.6:
  KVM: PPC: Critical interrupt emulation support
  KVM: PPC: e500mc: Fix tlbilx emulation for 64-bit guests
  KVM: PPC64: booke: Set interrupt computation mode for 64-bit host
  KVM: PPC: bookehv: Add ESR flag to Data Storage Interrupt
  KVM: PPC: bookehv64: Add support for std/ld emulation.
  booke: Added crit/mc exception handler for e500v2
  booke/bookehv: Add host crit-watchdog exception support
Signed-off-by: NAvi Kivity <avi@redhat.com>
...@@ -34,6 +34,8 @@ extern void __replay_interrupt(unsigned int vector); ...@@ -34,6 +34,8 @@ extern void __replay_interrupt(unsigned int vector);
extern void timer_interrupt(struct pt_regs *); extern void timer_interrupt(struct pt_regs *);
extern void performance_monitor_exception(struct pt_regs *regs); extern void performance_monitor_exception(struct pt_regs *regs);
extern void WatchdogException(struct pt_regs *regs);
extern void unknown_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/paca.h> #include <asm/paca.h>
......
...@@ -612,6 +612,12 @@ static void kvmppc_fill_pt_regs(struct pt_regs *regs) ...@@ -612,6 +612,12 @@ static void kvmppc_fill_pt_regs(struct pt_regs *regs)
regs->link = lr; regs->link = lr;
} }
/*
* For interrupts needed to be handled by host interrupt handlers,
* corresponding host handler are called from here in similar way
* (but not exact) as they are called from low level handler
* (such as from arch/powerpc/kernel/head_fsl_booke.S).
*/
static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
unsigned int exit_nr) unsigned int exit_nr)
{ {
...@@ -639,6 +645,17 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, ...@@ -639,6 +645,17 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
kvmppc_fill_pt_regs(&regs); kvmppc_fill_pt_regs(&regs);
performance_monitor_exception(&regs); performance_monitor_exception(&regs);
break; break;
case BOOKE_INTERRUPT_WATCHDOG:
kvmppc_fill_pt_regs(&regs);
#ifdef CONFIG_BOOKE_WDT
WatchdogException(&regs);
#else
unknown_exception(&regs);
#endif
break;
case BOOKE_INTERRUPT_CRITICAL:
unknown_exception(&regs);
break;
} }
} }
...@@ -683,6 +700,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -683,6 +700,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case BOOKE_INTERRUPT_WATCHDOG:
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_DOORBELL: case BOOKE_INTERRUPT_DOORBELL:
kvmppc_account_exit(vcpu, DBELL_EXITS); kvmppc_account_exit(vcpu, DBELL_EXITS);
r = RESUME_GUEST; r = RESUME_GUEST;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "booke.h" #include "booke.h"
#define OP_19_XOP_RFI 50 #define OP_19_XOP_RFI 50
#define OP_19_XOP_RFCI 51
#define OP_31_XOP_MFMSR 83 #define OP_31_XOP_MFMSR 83
#define OP_31_XOP_WRTEE 131 #define OP_31_XOP_WRTEE 131
...@@ -36,6 +37,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) ...@@ -36,6 +37,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
} }
static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.csrr0;
kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
}
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance) unsigned int inst, int *advance)
{ {
...@@ -52,6 +59,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -52,6 +59,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
*advance = 0; *advance = 0;
break; break;
case OP_19_XOP_RFCI:
kvmppc_emul_rfci(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS);
*advance = 0;
break;
default: default:
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
break; break;
...@@ -113,6 +126,12 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) ...@@ -113,6 +126,12 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_ESR: case SPRN_ESR:
vcpu->arch.shared->esr = spr_val; vcpu->arch.shared->esr = spr_val;
break; break;
case SPRN_CSRR0:
vcpu->arch.csrr0 = spr_val;
break;
case SPRN_CSRR1:
vcpu->arch.csrr1 = spr_val;
break;
case SPRN_DBCR0: case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val; vcpu->arch.dbcr0 = spr_val;
break; break;
...@@ -232,6 +251,12 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) ...@@ -232,6 +251,12 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_ESR: case SPRN_ESR:
*spr_val = vcpu->arch.shared->esr; *spr_val = vcpu->arch.shared->esr;
break; break;
case SPRN_CSRR0:
*spr_val = vcpu->arch.csrr0;
break;
case SPRN_CSRR1:
*spr_val = vcpu->arch.csrr1;
break;
case SPRN_DBCR0: case SPRN_DBCR0:
*spr_val = vcpu->arch.dbcr0; *spr_val = vcpu->arch.dbcr0;
break; break;
......
...@@ -53,16 +53,21 @@ ...@@ -53,16 +53,21 @@
(1<<BOOKE_INTERRUPT_PROGRAM) | \ (1<<BOOKE_INTERRUPT_PROGRAM) | \
(1<<BOOKE_INTERRUPT_DTLB_MISS)) (1<<BOOKE_INTERRUPT_DTLB_MISS))
.macro KVM_HANDLER ivor_nr .macro KVM_HANDLER ivor_nr scratch srr0
_GLOBAL(kvmppc_handler_\ivor_nr) _GLOBAL(kvmppc_handler_\ivor_nr)
/* Get pointer to vcpu and record exit number. */ /* Get pointer to vcpu and record exit number. */
mtspr SPRN_SPRG_WSCRATCH0, r4 mtspr \scratch , r4
mfspr r4, SPRN_SPRG_RVCPU mfspr r4, SPRN_SPRG_RVCPU
stw r3, VCPU_GPR(r3)(r4)
stw r5, VCPU_GPR(r5)(r4) stw r5, VCPU_GPR(r5)(r4)
stw r6, VCPU_GPR(r6)(r4) stw r6, VCPU_GPR(r6)(r4)
mfspr r3, \scratch
mfctr r5 mfctr r5
lis r6, kvmppc_resume_host@h stw r3, VCPU_GPR(r4)(r4)
stw r5, VCPU_CTR(r4) stw r5, VCPU_CTR(r4)
mfspr r3, \srr0
lis r6, kvmppc_resume_host@h
stw r3, VCPU_PC(r4)
li r5, \ivor_nr li r5, \ivor_nr
ori r6, r6, kvmppc_resume_host@l ori r6, r6, kvmppc_resume_host@l
mtctr r6 mtctr r6
...@@ -70,37 +75,35 @@ _GLOBAL(kvmppc_handler_\ivor_nr) ...@@ -70,37 +75,35 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
.endm .endm
_GLOBAL(kvmppc_handlers_start) _GLOBAL(kvmppc_handlers_start)
KVM_HANDLER BOOKE_INTERRUPT_CRITICAL KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_PROGRAM KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SYSCALL KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_FIT KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_DEBUG KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
_GLOBAL(kvmppc_handler_len) _GLOBAL(kvmppc_handler_len)
.long kvmppc_handler_1 - kvmppc_handler_0 .long kvmppc_handler_1 - kvmppc_handler_0
/* Registers: /* Registers:
* SPRG_SCRATCH0: guest r4 * SPRG_SCRATCH0: guest r4
* r4: vcpu pointer * r4: vcpu pointer
* r5: KVM exit number * r5: KVM exit number
*/ */
_GLOBAL(kvmppc_resume_host) _GLOBAL(kvmppc_resume_host)
stw r3, VCPU_GPR(r3)(r4)
mfcr r3 mfcr r3
stw r3, VCPU_CR(r4) stw r3, VCPU_CR(r4)
stw r7, VCPU_GPR(r7)(r4) stw r7, VCPU_GPR(r7)(r4)
...@@ -181,10 +184,6 @@ _GLOBAL(kvmppc_resume_host) ...@@ -181,10 +184,6 @@ _GLOBAL(kvmppc_resume_host)
stw r3, VCPU_LR(r4) stw r3, VCPU_LR(r4)
mfxer r3 mfxer r3
stw r3, VCPU_XER(r4) stw r3, VCPU_XER(r4)
mfspr r3, SPRN_SPRG_RSCRATCH0
stw r3, VCPU_GPR(r4)(r4)
mfspr r3, SPRN_SRR0
stw r3, VCPU_PC(r4)
/* Restore host stack pointer and PID before IVPR, since the host /* Restore host stack pointer and PID before IVPR, since the host
* exception handlers use them. */ * exception handlers use them. */
......
...@@ -267,7 +267,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ ...@@ -267,7 +267,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
......
/* /*
* Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved. * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
* *
* Author: Varun Sethi, <varun.sethi@freescale.com> * Author: Varun Sethi, <varun.sethi@freescale.com>
* *
...@@ -57,7 +57,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -57,7 +57,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe) struct kvm_book3e_206_tlb_entry *gtlbe)
{ {
unsigned int tid, ts; unsigned int tid, ts;
u32 val, eaddr, lpid; gva_t eaddr;
u32 val, lpid;
unsigned long flags; unsigned long flags;
ts = get_tlb_ts(gtlbe); ts = get_tlb_ts(gtlbe);
...@@ -183,6 +184,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -183,6 +184,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
SPRN_EPCR_DUVD; SPRN_EPCR_DUVD;
#ifdef CONFIG_64BIT
vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
#endif
vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
vcpu->arch.epsc = vcpu->arch.eplc; vcpu->arch.epsc = vcpu->arch.eplc;
......
...@@ -59,11 +59,13 @@ ...@@ -59,11 +59,13 @@
#define OP_31_XOP_STHBRX 918 #define OP_31_XOP_STHBRX 918
#define OP_LWZ 32 #define OP_LWZ 32
#define OP_LD 58
#define OP_LWZU 33 #define OP_LWZU 33
#define OP_LBZ 34 #define OP_LBZ 34
#define OP_LBZU 35 #define OP_LBZU 35
#define OP_STW 36 #define OP_STW 36
#define OP_STWU 37 #define OP_STWU 37
#define OP_STD 62
#define OP_STB 38 #define OP_STB 38
#define OP_STBU 39 #define OP_STBU 39
#define OP_LHZ 40 #define OP_LHZ 40
...@@ -392,6 +394,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -392,6 +394,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break; break;
/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
case OP_LD:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
break;
case OP_LWZU: case OP_LWZU:
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
...@@ -412,6 +420,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -412,6 +420,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
4, 1); 4, 1);
break; break;
/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
case OP_STD:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
8, 1);
break;
case OP_STWU: case OP_STWU:
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs), kvmppc_get_gpr(vcpu, rs),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册