提交 00c027db 编写于 作者: P Paolo Bonzini

Merge tag 'signed-kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-next

Patch queue for ppc - 2014-09-24

New awesome things in this release:

  - E500: e6500 core support
  - E500: guest and remote debug support
  - Book3S: remote sw breakpoint support
  - Book3S: HV: Minor bugfixes

Alexander Graf (1):
      KVM: PPC: Pass enum to kvmppc_get_last_inst

Bharat Bhushan (8):
      KVM: PPC: BOOKE: allow debug interrupt at "debug level"
      KVM: PPC: BOOKE : Emulate rfdi instruction
      KVM: PPC: BOOKE: Allow guest to change MSR_DE
      KVM: PPC: BOOKE: Clear guest dbsr in userspace exit KVM_EXIT_DEBUG
      KVM: PPC: BOOKE: Guest and hardware visible debug registers are same
      KVM: PPC: BOOKE: Add one reg interface for DBSR
      KVM: PPC: BOOKE: Add one_reg documentation of SPRG9 and DBSR
      KVM: PPC: BOOKE: Emulate debug registers and exception

Madhavan Srinivasan (2):
      powerpc/kvm: support to handle sw breakpoint
      powerpc/kvm: common sw breakpoint instr across ppc

Michael Neuling (1):
      KVM: PPC: Book3S HV: Add register name when loading toc

Mihai Caraman (10):
      powerpc/booke: Restrict SPE exception handlers to e200/e500 cores
      powerpc/booke: Revert SPE/AltiVec common defines for interrupt numbers
      KVM: PPC: Book3E: Increase FPU laziness
      KVM: PPC: Book3e: Add AltiVec support
      KVM: PPC: Make ONE_REG powerpc generic
      KVM: PPC: Move ONE_REG AltiVec support to powerpc
      KVM: PPC: Remove the tasklet used by the hrtimer
      KVM: PPC: Remove shared defines for SPE and AltiVec interrupts
      KVM: PPC: e500mc: Add support for single threaded vcpus on e6500 core
      KVM: PPC: Book3E: Enable e6500 core

Paul Mackerras (2):
      KVM: PPC: Book3S HV: Increase timeout for grabbing secondary threads
      KVM: PPC: Book3S HV: Only accept host PVR value for guest PVR
......@@ -1901,6 +1901,8 @@ registers, find a list below:
PPC | KVM_REG_PPC_ARCH_COMPAT | 32
PPC | KVM_REG_PPC_DABRX | 32
PPC | KVM_REG_PPC_WORT | 64
PPC | KVM_REG_PPC_SPRG9 | 64
PPC | KVM_REG_PPC_DBSR | 32
PPC | KVM_REG_PPC_TM_GPR0 | 64
...
PPC | KVM_REG_PPC_TM_GPR31 | 64
......
......@@ -53,17 +53,17 @@
#define BOOKE_INTERRUPT_DEBUG 15
/* E500 */
#define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32
#define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33
/*
* TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines
*/
#define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
#define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL
#define BOOKE_INTERRUPT_ALTIVEC_ASSIST \
BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST
#ifdef CONFIG_SPE_POSSIBLE
#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
#define BOOKE_INTERRUPT_SPE_FP_DATA 33
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#endif
#ifdef CONFIG_PPC_E500MC
#define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 32
#define BOOKE_INTERRUPT_ALTIVEC_ASSIST 33
#endif
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define BOOKE_INTERRUPT_DOORBELL 36
#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
......
......@@ -23,15 +23,16 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
/* LPIDs we support with this build -- runtime limit may be lower */
/*
* Number of available lpids. Only the low-order 6 bits of LPID rgister are
* implemented on e500mc+ cores.
*/
#define KVMPPC_NR_LPIDS 64
#define KVMPPC_INST_EHPRIV 0x7c00021c
#define EHPRIV_OC_SHIFT 11
/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
#define EHPRIV_OC_DEBUG 1
#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
(EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
......
......@@ -144,6 +144,7 @@ enum kvm_exit_types {
EMULATED_TLBWE_EXITS,
EMULATED_RFI_EXITS,
EMULATED_RFCI_EXITS,
EMULATED_RFDI_EXITS,
DEC_EXITS,
EXT_INTR_EXITS,
HALT_WAKEUP,
......@@ -589,8 +590,6 @@ struct kvm_vcpu_arch {
u32 crit_save;
/* guest debug registers*/
struct debug_reg dbg_reg;
/* hardware visible debug registers when in guest state */
struct debug_reg shadow_dbg_reg;
#endif
gpa_t paddr_accessed;
gva_t vaddr_accessed;
......@@ -612,7 +611,6 @@ struct kvm_vcpu_arch {
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
struct hrtimer dec_timer;
struct tasklet_struct tasklet;
u64 dec_jiffies;
u64 dec_expires;
unsigned long pending_exceptions;
......
......@@ -38,6 +38,12 @@
#include <asm/paca.h>
#endif
/*
* KVMPPC_INST_SW_BREAKPOINT is debug Instruction
* for supporting software breakpoint.
*/
#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
......@@ -89,7 +95,7 @@ extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
extern void kvmppc_decrementer_func(unsigned long data);
extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
......@@ -206,6 +212,9 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
union kvmppc_one_reg {
u32 wval;
u64 dval;
......
......@@ -319,6 +319,8 @@
* DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
*/
#ifdef CONFIG_BOOKE
#define DBSR_IDE 0x80000000 /* Imprecise Debug Event */
#define DBSR_MRR 0x30000000 /* Most Recent Reset */
#define DBSR_IC 0x08000000 /* Instruction Completion */
#define DBSR_BT 0x04000000 /* Branch Taken */
#define DBSR_IRPT 0x02000000 /* Exception Debug Event */
......
......@@ -476,6 +476,11 @@ struct kvm_get_htab_header {
/* FP and vector status/control registers */
#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
/*
* VSCR register is documented as a 32-bit register in the ISA, but it can
* only be accesses via a vector register. Expose VSCR as a 32-bit register
* even though the kernel represents it as a 128-bit vector.
*/
#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
/* Virtual processor areas */
......@@ -557,6 +562,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
#define KVM_REG_PPC_DBSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
......
......@@ -91,6 +91,7 @@ _GLOBAL(setup_altivec_idle)
blr
#ifdef CONFIG_PPC_E500MC
_GLOBAL(__setup_cpu_e6500)
mflr r6
#ifdef CONFIG_PPC64
......@@ -107,14 +108,20 @@ _GLOBAL(__setup_cpu_e6500)
bl __setup_cpu_e5500
mtlr r6
blr
#endif /* CONFIG_PPC_E500MC */
#ifdef CONFIG_PPC32
#ifdef CONFIG_E200
_GLOBAL(__setup_cpu_e200)
/* enable dedicated debug exception handling resources (Debug APU) */
mfspr r3,SPRN_HID0
ori r3,r3,HID0_DAPUEN@l
mtspr SPRN_HID0,r3
b __setup_e200_ivors
#endif /* CONFIG_E200 */
#ifdef CONFIG_E500
#ifndef CONFIG_PPC_E500MC
_GLOBAL(__setup_cpu_e500v1)
_GLOBAL(__setup_cpu_e500v2)
mflr r4
......@@ -129,6 +136,7 @@ _GLOBAL(__setup_cpu_e500v2)
#endif
mtlr r4
blr
#else /* CONFIG_PPC_E500MC */
_GLOBAL(__setup_cpu_e500mc)
_GLOBAL(__setup_cpu_e5500)
mflr r5
......@@ -159,7 +167,9 @@ _GLOBAL(__setup_cpu_e5500)
2:
mtlr r5
blr
#endif
#endif /* CONFIG_PPC_E500MC */
#endif /* CONFIG_E500 */
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC_BOOK3E_64
_GLOBAL(__restore_cpu_e6500)
......
......@@ -1961,6 +1961,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_E500
#ifdef CONFIG_PPC32
#ifndef CONFIG_PPC_E500MC
{ /* e500 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x80200000,
......@@ -2000,6 +2001,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_e500,
.platform = "ppc8548",
},
#else
{ /* e500mc */
.pvr_mask = 0xffff0000,
.pvr_value = 0x80230000,
......@@ -2018,7 +2020,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_e500mc,
.platform = "ppce500mc",
},
#endif /* CONFIG_PPC_E500MC */
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC_E500MC
{ /* e5500 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x80240000,
......@@ -2062,6 +2066,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_e500mc,
.platform = "ppce6500",
},
#endif /* CONFIG_PPC_E500MC */
#ifdef CONFIG_PPC32
{ /* default match */
.pvr_mask = 0x00000000,
......
......@@ -635,7 +635,7 @@ interrupt_end_book3e:
/* Altivec Unavailable Interrupt */
START_EXCEPTION(altivec_unavailable);
NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL,
NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
PROLOG_ADDITION_NONE)
/* we can probably do a shorter exception entry for that one... */
EXCEPTION_COMMON(0x200)
......@@ -658,7 +658,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* AltiVec Assist */
START_EXCEPTION(altivec_assist);
NORMAL_EXCEPTION_PROLOG(0x220,
BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST,
BOOKE_INTERRUPT_ALTIVEC_ASSIST,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220)
INTS_DISABLE
......
......@@ -613,34 +613,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfspr r10, SPRN_SPRG_RSCRATCH0
b InstructionStorage
/* Define SPE handlers for e200 and e500v2 */
#ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL)
NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
beq 1f
bl load_up_spe
b fast_exception_return
1: addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \
#elif defined(CONFIG_SPE_POSSIBLE)
EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
#endif /* CONFIG_SPE_POSSIBLE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
SPEFloatingPointException, EXC_XFER_EE)
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
SPEFloatingPointRoundException, EXC_XFER_EE)
#else
EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData,
#elif defined(CONFIG_SPE_POSSIBLE)
EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
#endif /* CONFIG_SPE_POSSIBLE */
/* Performance Monitor */
EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
......@@ -947,6 +949,7 @@ get_phys_addr:
* Global functions
*/
#ifdef CONFIG_E200
/* Adjust or setup IVORs for e200 */
_GLOBAL(__setup_e200_ivors)
li r3,DebugDebug@l
......@@ -959,7 +962,10 @@ _GLOBAL(__setup_e200_ivors)
mtspr SPRN_IVOR34,r3
sync
blr
#endif
#ifdef CONFIG_E500
#ifndef CONFIG_PPC_E500MC
/* Adjust or setup IVORs for e500v1/v2 */
_GLOBAL(__setup_e500_ivors)
li r3,DebugCrit@l
......@@ -974,7 +980,7 @@ _GLOBAL(__setup_e500_ivors)
mtspr SPRN_IVOR35,r3
sync
blr
#else
/* Adjust or setup IVORs for e500mc */
_GLOBAL(__setup_e500mc_ivors)
li r3,DebugDebug@l
......@@ -1000,6 +1006,8 @@ _GLOBAL(__setup_ehv_ivors)
mtspr SPRN_IVOR41,r3
sync
blr
#endif /* CONFIG_PPC_E500MC */
#endif /* CONFIG_E500 */
#ifdef CONFIG_SPE
/*
......
......@@ -535,174 +535,111 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -ENOTSUPP;
}
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
int r;
union kvmppc_one_reg val;
int size;
int r = 0;
long int i;
size = one_reg_size(reg->id);
if (size > sizeof(val))
return -EINVAL;
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
switch (id) {
case KVM_REG_PPC_DAR:
val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));
*val = get_reg_val(id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));
*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;
val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
i = id - KVM_REG_PPC_FPR0;
*val = get_reg_val(id, VCPU_FPR(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
break;
#ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
*val = get_reg_val(id, vcpu->arch.fp.fpscr);
break;
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, vcpu->arch.vrsave);
break;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = reg->id - KVM_REG_PPC_VSR0;
val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
i = id - KVM_REG_PPC_VSR0;
val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
case KVM_REG_PPC_DEBUG_INST: {
u32 opcode = INS_TW;
r = copy_to_user((u32 __user *)(long)reg->addr,
&opcode, sizeof(u32));
case KVM_REG_PPC_DEBUG_INST:
*val = get_reg_val(id, INS_TW);
break;
}
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
if (!vcpu->arch.icp) {
r = -ENXIO;
break;
}
val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
val = get_reg_val(reg->id, vcpu->arch.fscr);
*val = get_reg_val(id, vcpu->arch.fscr);
break;
case KVM_REG_PPC_TAR:
val = get_reg_val(reg->id, vcpu->arch.tar);
*val = get_reg_val(id, vcpu->arch.tar);
break;
case KVM_REG_PPC_EBBHR:
val = get_reg_val(reg->id, vcpu->arch.ebbhr);
*val = get_reg_val(id, vcpu->arch.ebbhr);
break;
case KVM_REG_PPC_EBBRR:
val = get_reg_val(reg->id, vcpu->arch.ebbrr);
*val = get_reg_val(id, vcpu->arch.ebbrr);
break;
case KVM_REG_PPC_BESCR:
val = get_reg_val(reg->id, vcpu->arch.bescr);
*val = get_reg_val(id, vcpu->arch.bescr);
break;
case KVM_REG_PPC_VTB:
val = get_reg_val(reg->id, vcpu->arch.vtb);
*val = get_reg_val(id, vcpu->arch.vtb);
break;
case KVM_REG_PPC_IC:
val = get_reg_val(reg->id, vcpu->arch.ic);
*val = get_reg_val(id, vcpu->arch.ic);
break;
default:
r = -EINVAL;
break;
}
}
if (r)
return r;
if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
r = -EFAULT;
return r;
}
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
int r;
union kvmppc_one_reg val;
int size;
int r = 0;
long int i;
size = one_reg_size(reg->id);
if (size > sizeof(val))
return -EINVAL;
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
return -EFAULT;
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
switch (id) {
case KVM_REG_PPC_DAR:
kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));
kvmppc_set_dar(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DSISR:
kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));
kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;
VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
i = id - KVM_REG_PPC_FPR0;
VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
break;
case KVM_REG_PPC_FPSCR:
vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
break;
#ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vrsave = set_reg_val(reg->id, val);
vcpu->arch.fp.fpscr = set_reg_val(id, *val);
break;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = reg->id - KVM_REG_PPC_VSR0;
vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
i = id - KVM_REG_PPC_VSR0;
vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
} else {
r = -ENXIO;
}
......@@ -715,29 +652,29 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break;
}
r = kvmppc_xics_set_icp(vcpu,
set_reg_val(reg->id, val));
set_reg_val(id, *val));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
vcpu->arch.fscr = set_reg_val(reg->id, val);
vcpu->arch.fscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TAR:
vcpu->arch.tar = set_reg_val(reg->id, val);
vcpu->arch.tar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_EBBHR:
vcpu->arch.ebbhr = set_reg_val(reg->id, val);
vcpu->arch.ebbhr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_EBBRR:
vcpu->arch.ebbrr = set_reg_val(reg->id, val);
vcpu->arch.ebbrr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_BESCR:
vcpu->arch.bescr = set_reg_val(reg->id, val);
vcpu->arch.bescr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
vcpu->arch.vtb = set_reg_val(reg->id, val);
vcpu->arch.vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IC:
vcpu->arch.ic = set_reg_val(reg->id, val);
vcpu->arch.ic = set_reg_val(id, *val);
break;
default:
r = -EINVAL;
......@@ -778,13 +715,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
return -EINVAL;
vcpu->guest_debug = dbg->control;
return 0;
}
void kvmppc_decrementer_func(unsigned long data)
void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
kvmppc_core_queue_dec(vcpu);
kvm_vcpu_kick(vcpu);
}
......
......@@ -725,6 +725,30 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
return kvmppc_hcall_impl_hv_realmode(cmd);
}
static int kvmppc_emulate_debug_inst(struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
u32 last_inst;
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
EMULATE_DONE) {
/*
* Fetch failed, so return to guest and
* try executing it again.
*/
return RESUME_GUEST;
}
if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST;
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
}
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
......@@ -807,12 +831,18 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
/*
* This occurs if the guest executes an illegal instruction.
* We just generate a program interrupt to the guest, since
* we don't emulate any guest instructions at this stage.
* If the guest debug is disabled, generate a program interrupt
* to the guest. If guest debug is enabled, we need to check
* whether the instruction is a software breakpoint instruction.
* Accordingly return to Guest or Host.
*/
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
r = kvmppc_emulate_debug_inst(run, vcpu);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
}
break;
/*
* This occurs if the guest (kernel or userspace), does something that
......@@ -856,7 +886,9 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
{
int i, j;
kvmppc_set_pvr_hv(vcpu, sregs->pvr);
/* Only accept the same PVR as the host's, since we can't spoof it */
if (sregs->pvr != vcpu->arch.pvr)
return -EINVAL;
j = 0;
for (i = 0; i < vcpu->arch.slb_nr; i++) {
......@@ -922,6 +954,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
long int i;
switch (id) {
case KVM_REG_PPC_DEBUG_INST:
*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
break;
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, 0);
break;
......@@ -1489,7 +1524,7 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
static int kvmppc_grab_hwthread(int cpu)
{
struct paca_struct *tpaca;
long timeout = 1000;
long timeout = 10000;
tpaca = &paca[cpu];
......
......@@ -355,6 +355,7 @@ kvmppc_hv_entry:
* MSR = ~IR|DR
* R13 = PACA
* R1 = host R1
* R2 = TOC
* all other volatile GPRS = free
*/
mflr r0
......@@ -503,7 +504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
toc_tlbie_lock:
.tc native_tlbie_lock[TC],native_tlbie_lock
.previous
ld r3,toc_tlbie_lock@toc(2)
ld r3,toc_tlbie_lock@toc(r2)
#ifdef __BIG_ENDIAN__
lwz r8,PACA_LOCK_TOKEN(r13)
#else
......
......@@ -1320,6 +1320,9 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
int r = 0;
switch (id) {
case KVM_REG_PPC_DEBUG_INST:
*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
break;
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior);
break;
......
......@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
}
#endif
/*
* Load up guest vcpu FP state if it's needed.
* It also set the MSR_FP in thread so that host know
* we're holding FPU, and then host can help to save
* guest vcpu FP state if other threads require to use FPU.
* This simulates an FP unavailable fault.
*
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (!(current->thread.regs->msr & MSR_FP)) {
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
current->thread.fp_save_area = &vcpu->arch.fp;
current->thread.regs->msr |= MSR_FP;
}
#endif
}
/*
* Save guest vcpu FP state into thread.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
current->thread.fp_save_area = NULL;
#endif
}
static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
{
#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
......@@ -134,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
#endif
}
/*
* Simulate AltiVec unavailable fault to load guest state
* from thread to AltiVec unit.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
if (!(current->thread.regs->msr & MSR_VEC)) {
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
current->thread.vr_save_area = &vcpu->arch.vr;
current->thread.regs->msr |= MSR_VEC;
}
}
#endif
}
/*
* Save guest vcpu AltiVec state into thread.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
current->thread.vr_save_area = NULL;
}
#endif
}
static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
{
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
......@@ -267,6 +335,16 @@ static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
}
void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
}
void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
{
clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
}
static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
{
kvmppc_set_srr0(vcpu, srr0);
......@@ -341,9 +419,15 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
#ifdef CONFIG_SPE_POSSIBLE
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
#endif
#ifdef CONFIG_ALTIVEC
case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
#endif
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
......@@ -377,7 +461,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
allowed = vcpu->arch.shared->msr & MSR_DE;
allowed = allowed && !crit;
msr_mask = MSR_ME;
int_class = INT_CLASS_CRIT;
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
int_class = INT_CLASS_DBG;
else
int_class = INT_CLASS_CRIT;
break;
}
......@@ -654,20 +742,27 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
* as always using the FPU. Kernel usage of FP (via
* enable_kernel_fp()) in this thread must not occur while
* vcpu->fpu_active is set.
* as always using the FPU.
*/
vcpu->fpu_active = 1;
kvmppc_load_guest_fp(vcpu);
#endif
#ifdef CONFIG_ALTIVEC
/* Save userspace AltiVec state in stack */
if (cpu_has_feature(CPU_FTR_ALTIVEC))
enable_kernel_altivec();
/*
* Since we can't trap on MSR_VEC in GS-mode, we consider the guest
* as always using the AltiVec.
*/
kvmppc_load_guest_altivec(vcpu);
#endif
/* Switch to guest debug context */
debug = vcpu->arch.shadow_dbg_reg;
debug = vcpu->arch.dbg_reg;
switch_booke_debug_regs(&debug);
debug = current->thread.debug;
current->thread.debug = vcpu->arch.shadow_dbg_reg;
current->thread.debug = vcpu->arch.dbg_reg;
vcpu->arch.pgdir = current->mm->pgd;
kvmppc_fix_ee_before_entry();
......@@ -683,8 +778,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
#endif
vcpu->fpu_active = 0;
#ifdef CONFIG_ALTIVEC
kvmppc_save_guest_altivec(vcpu);
#endif
out:
......@@ -728,9 +825,36 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
u32 dbsr = vcpu->arch.dbsr;
if (vcpu->guest_debug == 0) {
/*
* Debug resources belong to Guest.
* Imprecise debug event is not injected
*/
if (dbsr & DBSR_IDE) {
dbsr &= ~DBSR_IDE;
if (!dbsr)
return RESUME_GUEST;
}
if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
(vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
kvmppc_core_queue_debug(vcpu);
/* Inject a program interrupt if trap debug is not allowed */
if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
kvmppc_core_queue_program(vcpu, ESR_PTR);
return RESUME_GUEST;
}
/*
* Debug resource owned by userspace.
* Clear guest dbsr (vcpu->arch.dbsr)
*/
vcpu->arch.dbsr = 0;
run->debug.arch.status = 0;
run->debug.arch.address = vcpu->arch.pc;
......@@ -868,7 +992,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOKE_INTERRUPT_DATA_STORAGE:
case BOOKE_INTERRUPT_DTLB_MISS:
case BOOKE_INTERRUPT_HV_PRIV:
emulated = kvmppc_get_last_inst(vcpu, false, &last_inst);
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
break;
case BOOKE_INTERRUPT_PROGRAM:
/* SW breakpoints arrive as illegal instructions on HV */
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
break;
default:
break;
......@@ -947,6 +1076,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_PROGRAM:
if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
(last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
/*
* We are here because of an SW breakpoint instr,
* so lets return to host to handle.
*/
r = kvmppc_handle_debug(run, vcpu);
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
break;
}
if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
/*
* Program traps generated by user-level software must
......@@ -991,7 +1132,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
#else
#elif defined(CONFIG_SPE_POSSIBLE)
case BOOKE_INTERRUPT_SPE_UNAVAIL:
/*
* Guest wants SPE, but host kernel doesn't support it. Send
......@@ -1012,6 +1153,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST;
break;
#endif /* CONFIG_SPE_POSSIBLE */
/*
* On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
* see kvmppc_core_check_processor_compat().
*/
#ifdef CONFIG_ALTIVEC
case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
r = RESUME_GUEST;
break;
#endif
case BOOKE_INTERRUPT_DATA_STORAGE:
......@@ -1188,6 +1345,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
kvmppc_load_guest_fp(vcpu);
kvmppc_load_guest_altivec(vcpu);
}
}
......@@ -1243,6 +1402,11 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
(unsigned long)vcpu);
/*
* Clear DBSR.MRR to avoid guest debug interrupt as
* this is of host interest
*/
mtspr(SPRN_DBSR, DBSR_MRR);
return 0;
}
......@@ -1457,144 +1621,125 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
}
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
int r = 0;
union kvmppc_one_reg val;
int size;
size = one_reg_size(reg->id);
if (size > sizeof(val))
return -EINVAL;
switch (reg->id) {
switch (id) {
case KVM_REG_PPC_IAC1:
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
*val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
break;
case KVM_REG_PPC_IAC2:
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
*val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
*val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
break;
case KVM_REG_PPC_IAC4:
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
*val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
break;
#endif
case KVM_REG_PPC_DAC1:
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
*val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
break;
case KVM_REG_PPC_DAC2:
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
*val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
break;
case KVM_REG_PPC_EPR: {
u32 epr = kvmppc_get_epr(vcpu);
val = get_reg_val(reg->id, epr);
*val = get_reg_val(id, epr);
break;
}
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR:
val = get_reg_val(reg->id, vcpu->arch.epcr);
*val = get_reg_val(id, vcpu->arch.epcr);
break;
#endif
case KVM_REG_PPC_TCR:
val = get_reg_val(reg->id, vcpu->arch.tcr);
*val = get_reg_val(id, vcpu->arch.tcr);
break;
case KVM_REG_PPC_TSR:
val = get_reg_val(reg->id, vcpu->arch.tsr);
*val = get_reg_val(id, vcpu->arch.tsr);
break;
case KVM_REG_PPC_DEBUG_INST:
val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
break;
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, vcpu->arch.vrsave);
*val = get_reg_val(id, vcpu->arch.vrsave);
break;
default:
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
break;
}
if (r)
return r;
if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
r = -EFAULT;
return r;
}
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
int r = 0;
union kvmppc_one_reg val;
int size;
size = one_reg_size(reg->id);
if (size > sizeof(val))
return -EINVAL;
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
return -EFAULT;
switch (reg->id) {
switch (id) {
case KVM_REG_PPC_IAC1:
vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IAC2:
vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IAC4:
vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
break;
#endif
case KVM_REG_PPC_DAC1:
vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAC2:
vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_EPR: {
u32 new_epr = set_reg_val(reg->id, val);
u32 new_epr = set_reg_val(id, *val);
kvmppc_set_epr(vcpu, new_epr);
break;
}
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR: {
u32 new_epcr = set_reg_val(reg->id, val);
u32 new_epcr = set_reg_val(id, *val);
kvmppc_set_epcr(vcpu, new_epcr);
break;
}
#endif
case KVM_REG_PPC_OR_TSR: {
u32 tsr_bits = set_reg_val(reg->id, val);
u32 tsr_bits = set_reg_val(id, *val);
kvmppc_set_tsr_bits(vcpu, tsr_bits);
break;
}
case KVM_REG_PPC_CLEAR_TSR: {
u32 tsr_bits = set_reg_val(reg->id, val);
u32 tsr_bits = set_reg_val(id, *val);
kvmppc_clr_tsr_bits(vcpu, tsr_bits);
break;
}
case KVM_REG_PPC_TSR: {
u32 tsr = set_reg_val(reg->id, val);
u32 tsr = set_reg_val(id, *val);
kvmppc_set_tsr(vcpu, tsr);
break;
}
case KVM_REG_PPC_TCR: {
u32 tcr = set_reg_val(reg->id, val);
u32 tcr = set_reg_val(id, *val);
kvmppc_set_tcr(vcpu, tcr);
break;
}
case KVM_REG_PPC_VRSAVE:
vcpu->arch.vrsave = set_reg_val(reg->id, val);
vcpu->arch.vrsave = set_reg_val(id, *val);
break;
default:
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
break;
}
......@@ -1694,10 +1839,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
update_timer_ints(vcpu);
}
void kvmppc_decrementer_func(unsigned long data)
void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
if (vcpu->arch.tcr & TCR_ARE) {
vcpu->arch.dec = vcpu->arch.decar;
kvmppc_emulate_dec(vcpu);
......@@ -1842,7 +1985,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int n, b = 0, w = 0;
if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
vcpu->arch.dbg_reg.dbcr0 = 0;
vcpu->guest_debug = 0;
kvm_guest_protect_msr(vcpu, MSR_DE, false);
return 0;
......@@ -1850,15 +1993,13 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_guest_protect_msr(vcpu, MSR_DE, true);
vcpu->guest_debug = dbg->control;
vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
/* Set DBCR0_EDM in guest visible DBCR0 register. */
vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
vcpu->arch.dbg_reg.dbcr0 = 0;
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
/* Code below handles only HW breakpoints */
dbg_reg = &(vcpu->arch.shadow_dbg_reg);
dbg_reg = &(vcpu->arch.dbg_reg);
#ifdef CONFIG_KVM_BOOKE_HV
/*
......
......@@ -32,9 +32,15 @@
#define BOOKE_IRQPRIO_ALIGNMENT 2
#define BOOKE_IRQPRIO_PROGRAM 3
#define BOOKE_IRQPRIO_FP_UNAVAIL 4
#ifdef CONFIG_SPE_POSSIBLE
#define BOOKE_IRQPRIO_SPE_UNAVAIL 5
#define BOOKE_IRQPRIO_SPE_FP_DATA 6
#define BOOKE_IRQPRIO_SPE_FP_ROUND 7
#endif
#ifdef CONFIG_PPC_E500MC
#define BOOKE_IRQPRIO_ALTIVEC_UNAVAIL 5
#define BOOKE_IRQPRIO_ALTIVEC_ASSIST 6
#endif
#define BOOKE_IRQPRIO_SYSCALL 8
#define BOOKE_IRQPRIO_AP_UNAVAIL 9
#define BOOKE_IRQPRIO_DTLB_MISS 10
......@@ -116,40 +122,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val);
/*
* Load up guest vcpu FP state if it's needed.
* It also set the MSR_FP in thread so that host know
* we're holding FPU, and then host can help to save
* guest vcpu FP state if other threads require to use FPU.
* This simulates an FP unavailable fault.
*
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
current->thread.fp_save_area = &vcpu->arch.fp;
current->thread.regs->msr |= MSR_FP;
}
#endif
}
/*
* Save guest vcpu FP state into thread.
* It requires to be called with preemption disabled.
*/
static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current);
current->thread.fp_save_area = NULL;
#endif
}
static inline void kvmppc_clear_dbsr(void)
{
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
......
......@@ -25,6 +25,7 @@
#define OP_19_XOP_RFI 50
#define OP_19_XOP_RFCI 51
#define OP_19_XOP_RFDI 39
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_WRTEE 131
......@@ -37,6 +38,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
}
static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.dsrr0;
kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
}
static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.csrr0;
......@@ -65,6 +72,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
*advance = 0;
break;
case OP_19_XOP_RFDI:
kvmppc_emul_rfdi(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_RFDI_EXITS);
*advance = 0;
break;
default:
emulated = EMULATE_FAIL;
break;
......@@ -118,6 +131,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
bool debug_inst = false;
switch (sprn) {
case SPRN_DEAR:
......@@ -132,14 +146,128 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_CSRR1:
vcpu->arch.csrr1 = spr_val;
break;
case SPRN_DSRR0:
vcpu->arch.dsrr0 = spr_val;
break;
case SPRN_DSRR1:
vcpu->arch.dsrr1 = spr_val;
break;
case SPRN_IAC1:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.iac1 = spr_val;
break;
case SPRN_IAC2:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.iac2 = spr_val;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case SPRN_IAC3:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.iac3 = spr_val;
break;
case SPRN_IAC4:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.iac4 = spr_val;
break;
#endif
case SPRN_DAC1:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.dac1 = spr_val;
break;
case SPRN_DAC2:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.dac2 = spr_val;
break;
case SPRN_DBCR0:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
spr_val &= (DBCR0_IDM | DBCR0_IC | DBCR0_BT | DBCR0_TIE |
DBCR0_IAC1 | DBCR0_IAC2 | DBCR0_IAC3 | DBCR0_IAC4 |
DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W);
vcpu->arch.dbg_reg.dbcr0 = spr_val;
break;
case SPRN_DBCR1:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.dbcr1 = spr_val;
break;
case SPRN_DBCR2:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
debug_inst = true;
vcpu->arch.dbg_reg.dbcr2 = spr_val;
break;
case SPRN_DBSR:
/*
* If userspace is debugging guest then guest
* can not access debug registers.
*/
if (vcpu->guest_debug)
break;
vcpu->arch.dbsr &= ~spr_val;
if (!(vcpu->arch.dbsr & ~DBSR_IDE))
kvmppc_core_dequeue_debug(vcpu);
break;
case SPRN_TSR:
kvmppc_clr_tsr_bits(vcpu, spr_val);
......@@ -252,6 +380,10 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
emulated = EMULATE_FAIL;
}
if (debug_inst) {
current->thread.debug = vcpu->arch.dbg_reg;
switch_booke_debug_regs(&vcpu->arch.dbg_reg);
}
return emulated;
}
......@@ -278,12 +410,43 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_CSRR1:
*spr_val = vcpu->arch.csrr1;
break;
case SPRN_DSRR0:
*spr_val = vcpu->arch.dsrr0;
break;
case SPRN_DSRR1:
*spr_val = vcpu->arch.dsrr1;
break;
case SPRN_IAC1:
*spr_val = vcpu->arch.dbg_reg.iac1;
break;
case SPRN_IAC2:
*spr_val = vcpu->arch.dbg_reg.iac2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case SPRN_IAC3:
*spr_val = vcpu->arch.dbg_reg.iac3;
break;
case SPRN_IAC4:
*spr_val = vcpu->arch.dbg_reg.iac4;
break;
#endif
case SPRN_DAC1:
*spr_val = vcpu->arch.dbg_reg.dac1;
break;
case SPRN_DAC2:
*spr_val = vcpu->arch.dbg_reg.dac2;
break;
case SPRN_DBCR0:
*spr_val = vcpu->arch.dbg_reg.dbcr0;
if (vcpu->guest_debug)
*spr_val = *spr_val | DBCR0_EDM;
break;
case SPRN_DBCR1:
*spr_val = vcpu->arch.dbg_reg.dbcr1;
break;
case SPRN_DBCR2:
*spr_val = vcpu->arch.dbg_reg.dbcr2;
break;
case SPRN_DBSR:
*spr_val = vcpu->arch.dbsr;
break;
......
......@@ -238,7 +238,7 @@ kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \
kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1,NEED_ESR
SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \
......@@ -256,11 +256,9 @@ kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \
kvm_handler BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \
kvm_handler BOOKE_INTERRUPT_ALTIVEC_ASSIST, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
......@@ -350,7 +348,7 @@ kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
......@@ -361,9 +359,6 @@ kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
......
......@@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <asm/mmu-book3e.h>
#include <asm/tlb.h>
#include <asm/cputhreads.h>
enum vcpu_ftr {
VCPU_FTR_MMU_V2
......@@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
/*
* These functions should be called with preemption disabled
* and the returned value is valid only in that context
*/
static inline int get_thread_specific_lpid(int vm_lpid)
{
int vcpu_lpid = vm_lpid;
if (threads_per_core == 2)
vcpu_lpid |= smp_processor_id() & 1;
return vcpu_lpid;
}
static inline int get_lpid(struct kvm_vcpu *vcpu)
{
return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
}
#else
unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe);
......
......@@ -259,6 +259,7 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
break;
/* extra exceptions */
#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
......@@ -268,6 +269,15 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
#endif
#ifdef CONFIG_ALTIVEC
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
break;
#endif
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
......@@ -381,6 +391,7 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
break;
/* extra exceptions */
#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break;
......@@ -390,6 +401,15 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
case SPRN_IVOR34:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break;
#endif
#ifdef CONFIG_ALTIVEC
case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
break;
case SPRN_IVOR33:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
break;
#endif
case SPRN_IVOR35:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
break;
......
......@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
* writing shadow tlb entry to host TLB
*/
static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
uint32_t mas0)
uint32_t mas0,
uint32_t lpid)
{
unsigned long flags;
......@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
#ifdef CONFIG_KVM_BOOKE_HV
mtspr(SPRN_MAS8, stlbe->mas8);
mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
#endif
asm volatile("isync; tlbwe" : : : "memory");
......@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
if (tlbsel == 0) {
mas0 = get_host_mas0(stlbe->mas2);
__write_host_tlbe(stlbe, mas0);
__write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
} else {
__write_host_tlbe(stlbe,
MAS0_TLBSEL(1) |
MAS0_ESEL(to_htlb1_esel(sesel)));
MAS0_ESEL(to_htlb1_esel(sesel)),
vcpu_e500->vcpu.kvm->arch.lpid);
}
}
......@@ -176,7 +178,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
magic.mas8 = 0;
__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
preempt_enable();
}
#endif
......@@ -317,10 +319,6 @@ static void kvmppc_e500_setup_stlbe(
stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
#ifdef CONFIG_KVM_BOOKE_HV
stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
#endif
}
static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
......@@ -633,7 +631,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
local_irq_save(flags);
mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid);
mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
asm volatile("tlbsx 0, %[geaddr]\n" : :
[geaddr] "r" (geaddr));
mtspr(SPRN_MAS5, 0);
......
......@@ -48,10 +48,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
return;
}
tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
preempt_disable();
tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
mb();
ppc_msgsnd(dbell_type, 0, tag);
preempt_enable();
}
/* gtlbe must not be mapped by more than one host tlb entry */
......@@ -60,12 +61,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
{
unsigned int tid, ts;
gva_t eaddr;
u32 val, lpid;
u32 val;
unsigned long flags;
ts = get_tlb_ts(gtlbe);
tid = get_tlb_tid(gtlbe);
lpid = vcpu_e500->vcpu.kvm->arch.lpid;
/* We search the host TLB to invalidate its shadow TLB entry */
val = (tid << 16) | ts;
......@@ -74,7 +74,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
local_irq_save(flags);
mtspr(SPRN_MAS6, val);
mtspr(SPRN_MAS5, MAS5_SGS | lpid);
mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
val = mfspr(SPRN_MAS1);
......@@ -95,7 +95,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
unsigned long flags;
local_irq_save(flags);
mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
asm volatile("tlbilxlpid");
mtspr(SPRN_MAS5, 0);
local_irq_restore(flags);
......@@ -110,6 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
/* We use two lpids per VM */
static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
......@@ -118,10 +119,12 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
kvmppc_booke_vcpu_load(vcpu, cpu);
mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
mtspr(SPRN_LPID, get_lpid(vcpu));
mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
mtspr(SPRN_GPIR, vcpu->vcpu_id);
mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
vcpu->arch.epsc = vcpu->arch.eplc;
mtspr(SPRN_EPLC, vcpu->arch.eplc);
mtspr(SPRN_EPSC, vcpu->arch.epsc);
......@@ -141,12 +144,10 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
__get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) {
__get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
__get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
__get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu;
}
kvmppc_load_guest_fp(vcpu);
}
static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
......@@ -179,6 +180,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
r = 0;
#ifdef CONFIG_ALTIVEC
/*
* Since guests have the priviledge to enable AltiVec, we need AltiVec
* support in the host to save/restore their context.
* Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
* because it's cleared in the absence of CONFIG_ALTIVEC!
*/
else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
r = 0;
#endif
else
r = -ENOTSUPP;
......@@ -194,9 +205,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
#ifdef CONFIG_64BIT
vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
#endif
vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
vcpu->arch.epsc = vcpu->arch.eplc;
vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu_e500->svr = mfspr(SPRN_SVR);
......@@ -356,13 +365,26 @@ static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
if (lpid < 0)
return lpid;
/*
* Use two lpids per VM on cores with two threads like e6500. Use
* even numbers to speedup vcpu lpid computation with consecutive lpids
* per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
*/
if (threads_per_core == 2)
lpid <<= 1;
kvm->arch.lpid = lpid;
return 0;
}
static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
{
kvmppc_free_lpid(kvm->arch.lpid);
int lpid = kvm->arch.lpid;
if (threads_per_core == 2)
lpid >>= 1;
kvmppc_free_lpid(lpid);
}
static struct kvmppc_ops kvm_ops_e500mc = {
......@@ -390,7 +412,13 @@ static int __init kvmppc_e500mc_init(void)
if (r)
goto err_out;
kvmppc_init_lpid(64);
/*
* Use two lpids per VM on dual threaded processors like e6500
* to workarround the lack of tlb write conditional instruction.
* Expose half the number of available hardware lpids to the lpid
* allocator.
*/
kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
kvmppc_claim_lpid(0); /* host */
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
......
......@@ -219,7 +219,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
emulated = kvmppc_get_last_inst(vcpu, false, &inst);
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
if (emulated != EMULATE_DONE)
return emulated;
......@@ -274,6 +274,21 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
break;
case 0:
/*
* Instruction with primary opcode 0. Based on PowerISA
* these are illegal instructions.
*/
if (inst == KVMPPC_INST_SW_BREAKPOINT) {
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.address = kvmppc_get_pc(vcpu);
emulated = EMULATE_EXIT_USER;
advance = 0;
} else
emulated = EMULATE_FAIL;
break;
default:
emulated = EMULATE_FAIL;
}
......
......@@ -58,7 +58,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
emulated = kvmppc_get_last_inst(vcpu, false, &inst);
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
if (emulated != EMULATE_DONE)
return emulated;
......
......@@ -294,7 +294,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 last_inst;
kvmppc_get_last_inst(vcpu, false, &last_inst);
kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
/* XXX Deliver Program interrupt to guest. */
pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
r = RESUME_HOST;
......@@ -638,7 +638,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
/* Make sure we're not using the vcpu anymore */
hrtimer_cancel(&vcpu->arch.dec_timer);
tasklet_kill(&vcpu->arch.tasklet);
kvmppc_remove_vcpu_debugfs(vcpu);
......@@ -664,16 +663,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvmppc_core_pending_dec(vcpu);
}
/*
* low level hrtimer wake routine. Because this runs in hardirq context
* we schedule a tasklet to do the real work.
*/
enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
tasklet_schedule(&vcpu->arch.tasklet);
kvmppc_decrementer_func(vcpu);
return HRTIMER_NORESTART;
}
......@@ -683,7 +678,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
int ret;
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
vcpu->arch.dec_expires = ~(u64)0;
......@@ -907,6 +901,103 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
int r = 0;
union kvmppc_one_reg val;
int size;
size = one_reg_size(reg->id);
if (size > sizeof(val))
return -EINVAL;
r = kvmppc_get_one_reg(vcpu, reg->id, &val);
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
#ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
vcpu->arch.vrsave = set_reg_val(reg->id, val);
break;
#endif /* CONFIG_ALTIVEC */
default:
r = -EINVAL;
break;
}
}
if (r)
return r;
if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
r = -EFAULT;
return r;
}
int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
int r;
union kvmppc_one_reg val;
int size;
size = one_reg_size(reg->id);
if (size > sizeof(val))
return -EINVAL;
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
return -EFAULT;
r = kvmppc_set_one_reg(vcpu, reg->id, &val);
if (r == -EINVAL) {
r = 0;
switch (reg->id) {
#ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
break;
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, vcpu->arch.vrsave);
break;
#endif /* CONFIG_ALTIVEC */
default:
r = -EINVAL;
break;
}
}
return r;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r;
......
......@@ -303,9 +303,13 @@ config PPC_ICSWX_USE_SIGILL
If in doubt, say N here.
config SPE_POSSIBLE
def_bool y
depends on E200 || (E500 && !PPC_E500MC)
config SPE
bool "SPE Support"
depends on E200 || (E500 && !PPC_E500MC)
depends on SPE_POSSIBLE
default y
---help---
This option enables kernel support for the Signal Processing
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册