提交 a2d56020 编写于 作者: P Paul Mackerras 提交者: Alexander Graf

KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu

Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct.  For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.

This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest.  Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest.  We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.

This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.

With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits.  The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 f2481771
...@@ -200,140 +200,76 @@ extern void kvm_return_point(void); ...@@ -200,140 +200,76 @@ extern void kvm_return_point(void);
#include <asm/kvm_book3s_64.h> #include <asm/kvm_book3s_64.h>
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_PR
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
return to_book3s(vcpu)->hior;
}
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
unsigned long pending_now, unsigned long old_pending)
{
if (pending_now)
vcpu->arch.shared->int_pending = 1;
else if (old_pending)
vcpu->arch.shared->int_pending = 0;
}
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{ {
if ( num < 14 ) {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
svcpu->gpr[num] = val;
svcpu_put(svcpu);
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
} else
vcpu->arch.gpr[num] = val; vcpu->arch.gpr[num] = val;
} }
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{ {
if ( num < 14 ) {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
ulong r = svcpu->gpr[num];
svcpu_put(svcpu);
return r;
} else
return vcpu->arch.gpr[num]; return vcpu->arch.gpr[num];
} }
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.cr = val;
svcpu->cr = val;
svcpu_put(svcpu);
to_book3s(vcpu)->shadow_vcpu->cr = val;
} }
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); return vcpu->arch.cr;
u32 r;
r = svcpu->cr;
svcpu_put(svcpu);
return r;
} }
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.xer = val;
svcpu->xer = val;
to_book3s(vcpu)->shadow_vcpu->xer = val;
svcpu_put(svcpu);
} }
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); return vcpu->arch.xer;
u32 r;
r = svcpu->xer;
svcpu_put(svcpu);
return r;
} }
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.ctr = val;
svcpu->ctr = val;
svcpu_put(svcpu);
} }
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); return vcpu->arch.ctr;
ulong r;
r = svcpu->ctr;
svcpu_put(svcpu);
return r;
} }
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.lr = val;
svcpu->lr = val;
svcpu_put(svcpu);
} }
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); return vcpu->arch.lr;
ulong r;
r = svcpu->lr;
svcpu_put(svcpu);
return r;
} }
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.pc = val;
svcpu->pc = val;
svcpu_put(svcpu);
} }
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); return vcpu->arch.pc;
ulong r;
r = svcpu->pc;
svcpu_put(svcpu);
return r;
} }
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{ {
ulong pc = kvmppc_get_pc(vcpu); ulong pc = kvmppc_get_pc(vcpu);
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u32 r;
/* Load the instruction manually if it failed to do so in the /* Load the instruction manually if it failed to do so in the
* exit path */ * exit path */
if (svcpu->last_inst == KVM_INST_FETCH_FAILED) if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
r = svcpu->last_inst; return vcpu->arch.last_inst;
svcpu_put(svcpu);
return r;
} }
/* /*
...@@ -344,26 +280,34 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) ...@@ -344,26 +280,34 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
{ {
ulong pc = kvmppc_get_pc(vcpu) - 4; ulong pc = kvmppc_get_pc(vcpu) - 4;
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u32 r;
/* Load the instruction manually if it failed to do so in the /* Load the instruction manually if it failed to do so in the
* exit path */ * exit path */
if (svcpu->last_inst == KVM_INST_FETCH_FAILED) if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
r = svcpu->last_inst; return vcpu->arch.last_inst;
svcpu_put(svcpu);
return r;
} }
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); return vcpu->arch.fault_dar;
ulong r; }
r = svcpu->fault_dar;
svcpu_put(svcpu); #ifdef CONFIG_KVM_BOOK3S_PR
return r;
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
return to_book3s(vcpu)->hior;
}
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
unsigned long pending_now, unsigned long old_pending)
{
if (pending_now)
vcpu->arch.shared->int_pending = 1;
else if (old_pending)
vcpu->arch.shared->int_pending = 0;
} }
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
...@@ -397,100 +341,6 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, ...@@ -397,100 +341,6 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
{ {
} }
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.xer = val;
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.ctr = val;
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.lr = val;
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.lr;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.pc = val;
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
return vcpu->arch.pc;
}
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
ulong pc = kvmppc_get_pc(vcpu);
/* Load the instruction manually if it failed to do so in the
* exit path */
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
return vcpu->arch.last_inst;
}
/*
* Like kvmppc_get_last_inst(), but for fetching a sc instruction.
* Because the sc instruction sets SRR0 to point to the following
* instruction, we have to fetch from pc - 4.
*/
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
{
ulong pc = kvmppc_get_pc(vcpu) - 4;
/* Load the instruction manually if it failed to do so in the
* exit path */
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
return vcpu->arch.last_inst;
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dar;
}
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
{ {
return false; return false;
......
...@@ -109,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu { ...@@ -109,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14]; ulong gpr[14];
u32 cr; u32 cr;
u32 xer; u32 xer;
u32 fault_dsisr;
u32 last_inst;
ulong ctr; ulong ctr;
ulong lr; ulong lr;
ulong pc; ulong pc;
ulong shadow_srr1; ulong shadow_srr1;
ulong fault_dar; ulong fault_dar;
u32 fault_dsisr;
u32 last_inst;
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
u32 sr[16]; /* Guest SRs */ u32 sr[16]; /* Guest SRs */
......
...@@ -463,6 +463,7 @@ struct kvm_vcpu_arch { ...@@ -463,6 +463,7 @@ struct kvm_vcpu_arch {
ulong dabr; ulong dabr;
ulong cfar; ulong cfar;
ulong ppr; ulong ppr;
ulong shadow_srr1;
#endif #endif
u32 vrsave; /* also USPRG0 */ u32 vrsave; /* also USPRG0 */
u32 mmucr; u32 mmucr;
......
...@@ -520,6 +520,7 @@ int main(void) ...@@ -520,6 +520,7 @@ int main(void)
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
...@@ -527,14 +528,13 @@ int main(void) ...@@ -527,14 +528,13 @@ int main(void)
DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
offsetof(struct kvmppc_vcpu_book3s, vcpu));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR #ifdef CONFIG_KVM_BOOK3S_PR
DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
#else #else
# define SVCPU_FIELD(x, f) # define SVCPU_FIELD(x, f)
......
...@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_st(vcpu, &addr, 32, zeros, true); r = kvmppc_st(vcpu, &addr, 32, zeros, true);
if ((r == -ENOENT) || (r == -EPERM)) { if ((r == -ENOENT) || (r == -EPERM)) {
struct kvmppc_book3s_shadow_vcpu *svcpu;
svcpu = svcpu_get(vcpu);
*advance = 0; *advance = 0;
vcpu->arch.shared->dar = vaddr; vcpu->arch.shared->dar = vaddr;
svcpu->fault_dar = vaddr; vcpu->arch.fault_dar = vaddr;
dsisr = DSISR_ISSTORE; dsisr = DSISR_ISSTORE;
if (r == -ENOENT) if (r == -ENOENT)
...@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
dsisr |= DSISR_PROTFAULT; dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->dsisr = dsisr; vcpu->arch.shared->dsisr = dsisr;
svcpu->fault_dsisr = dsisr; vcpu->arch.fault_dsisr = dsisr;
svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE); BOOK3S_INTERRUPT_DATA_STORAGE);
......
...@@ -26,8 +26,12 @@ ...@@ -26,8 +26,12 @@
#if defined(CONFIG_PPC_BOOK3S_64) #if defined(CONFIG_PPC_BOOK3S_64)
#define FUNC(name) GLUE(.,name) #define FUNC(name) GLUE(.,name)
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32) #elif defined(CONFIG_PPC_BOOK3S_32)
#define FUNC(name) name #define FUNC(name) name
#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
#endif /* CONFIG_PPC_BOOK3S_XX */ #endif /* CONFIG_PPC_BOOK3S_XX */
#define VCPU_LOAD_NVGPRS(vcpu) \ #define VCPU_LOAD_NVGPRS(vcpu) \
...@@ -87,8 +91,14 @@ kvm_start_entry: ...@@ -87,8 +91,14 @@ kvm_start_entry:
VCPU_LOAD_NVGPRS(r4) VCPU_LOAD_NVGPRS(r4)
kvm_start_lightweight: kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
GET_SHADOW_VCPU(r3)
bl FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* Get the dcbz32 flag */
PPC_LL r3, VCPU_HFLAGS(r4) PPC_LL r3, VCPU_HFLAGS(r4)
rldicl r3, r3, 0, 63 /* r3 &= 1 */ rldicl r3, r3, 0, 63 /* r3 &= 1 */
stb r3, HSTATE_RESTORE_HID5(r13) stb r3, HSTATE_RESTORE_HID5(r13)
...@@ -125,18 +135,31 @@ kvmppc_handler_highmem: ...@@ -125,18 +135,31 @@ kvmppc_handler_highmem:
* *
*/ */
/* R7 = vcpu */ /* Transfer reg values from shadow vcpu back to vcpu struct */
PPC_LL r7, GPR4(r1) /* On 64-bit, interrupts are still off at this point */
PPC_LL r3, GPR4(r1) /* vcpu pointer */
GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu)
nop
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* Re-enable interrupts */
ld r3, HSTATE_HOST_MSR(r13)
ori r3, r3, MSR_EE
MTMSR_EERI(r3)
/* /*
* Reload kernel SPRG3 value. * Reload kernel SPRG3 value.
* No need to save guest value as usermode can't modify SPRG3. * No need to save guest value as usermode can't modify SPRG3.
*/ */
ld r3, PACA_SPRG3(r13) ld r3, PACA_SPRG3(r13)
mtspr SPRN_SPRG3, r3 mtspr SPRN_SPRG3, r3
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
/* R7 = vcpu */
PPC_LL r7, GPR4(r1)
PPC_STL r14, VCPU_GPR(R14)(r7) PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(R15)(r7) PPC_STL r15, VCPU_GPR(R15)(r7)
PPC_STL r16, VCPU_GPR(R16)(r7) PPC_STL r16, VCPU_GPR(R16)(r7)
......
...@@ -61,8 +61,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -61,8 +61,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
svcpu_put(svcpu); svcpu_put(svcpu);
#endif #endif
...@@ -77,8 +75,6 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -77,8 +75,6 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
svcpu_put(svcpu); svcpu_put(svcpu);
#endif #endif
...@@ -87,6 +83,60 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -87,6 +83,60 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1; vcpu->cpu = -1;
} }
/* Copy data needed by real-mode code from vcpu to shadow vcpu */
void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
struct kvm_vcpu *vcpu)
{
svcpu->gpr[0] = vcpu->arch.gpr[0];
svcpu->gpr[1] = vcpu->arch.gpr[1];
svcpu->gpr[2] = vcpu->arch.gpr[2];
svcpu->gpr[3] = vcpu->arch.gpr[3];
svcpu->gpr[4] = vcpu->arch.gpr[4];
svcpu->gpr[5] = vcpu->arch.gpr[5];
svcpu->gpr[6] = vcpu->arch.gpr[6];
svcpu->gpr[7] = vcpu->arch.gpr[7];
svcpu->gpr[8] = vcpu->arch.gpr[8];
svcpu->gpr[9] = vcpu->arch.gpr[9];
svcpu->gpr[10] = vcpu->arch.gpr[10];
svcpu->gpr[11] = vcpu->arch.gpr[11];
svcpu->gpr[12] = vcpu->arch.gpr[12];
svcpu->gpr[13] = vcpu->arch.gpr[13];
svcpu->cr = vcpu->arch.cr;
svcpu->xer = vcpu->arch.xer;
svcpu->ctr = vcpu->arch.ctr;
svcpu->lr = vcpu->arch.lr;
svcpu->pc = vcpu->arch.pc;
}
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu)
{
vcpu->arch.gpr[0] = svcpu->gpr[0];
vcpu->arch.gpr[1] = svcpu->gpr[1];
vcpu->arch.gpr[2] = svcpu->gpr[2];
vcpu->arch.gpr[3] = svcpu->gpr[3];
vcpu->arch.gpr[4] = svcpu->gpr[4];
vcpu->arch.gpr[5] = svcpu->gpr[5];
vcpu->arch.gpr[6] = svcpu->gpr[6];
vcpu->arch.gpr[7] = svcpu->gpr[7];
vcpu->arch.gpr[8] = svcpu->gpr[8];
vcpu->arch.gpr[9] = svcpu->gpr[9];
vcpu->arch.gpr[10] = svcpu->gpr[10];
vcpu->arch.gpr[11] = svcpu->gpr[11];
vcpu->arch.gpr[12] = svcpu->gpr[12];
vcpu->arch.gpr[13] = svcpu->gpr[13];
vcpu->arch.cr = svcpu->cr;
vcpu->arch.xer = svcpu->xer;
vcpu->arch.ctr = svcpu->ctr;
vcpu->arch.lr = svcpu->lr;
vcpu->arch.pc = svcpu->pc;
vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
vcpu->arch.fault_dar = svcpu->fault_dar;
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
vcpu->arch.last_inst = svcpu->last_inst;
}
int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
{ {
int r = 1; /* Indicate we want to get back into the guest */ int r = 1; /* Indicate we want to get back into the guest */
...@@ -388,22 +438,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -388,22 +438,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) { if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */ /* Page not found in guest PTE entries */
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
vcpu->arch.shared->dsisr = svcpu->fault_dsisr; vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
vcpu->arch.shared->msr |= vcpu->arch.shared->msr |=
(svcpu->shadow_srr1 & 0x00000000f8000000ULL); vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) { } else if (page_found == -EPERM) {
/* Storage protection */ /* Storage protection */
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->msr |= vcpu->arch.shared->msr |=
svcpu->shadow_srr1 & 0x00000000f8000000ULL; vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) { } else if (page_found == -EINVAL) {
/* Page not found in guest SLB */ /* Page not found in guest SLB */
...@@ -645,21 +691,26 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -645,21 +691,26 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (exit_nr) { switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE: case BOOK3S_INTERRUPT_INST_STORAGE:
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); ulong shadow_srr1 = vcpu->arch.shadow_srr1;
ulong shadow_srr1 = svcpu->shadow_srr1;
vcpu->stat.pf_instruc++; vcpu->stat.pf_instruc++;
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So /* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */ * treat the respective fault as segment fault. */
if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { {
struct kvmppc_book3s_shadow_vcpu *svcpu;
u32 sr;
svcpu = svcpu_get(vcpu);
sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
svcpu_put(svcpu);
if (sr == SR_INVALID) {
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
r = RESUME_GUEST; r = RESUME_GUEST;
svcpu_put(svcpu);
break; break;
} }
}
#endif #endif
svcpu_put(svcpu);
/* only care about PTEG not found errors, but leave NX alone */ /* only care about PTEG not found errors, but leave NX alone */
if (shadow_srr1 & 0x40000000) { if (shadow_srr1 & 0x40000000) {
...@@ -684,21 +735,26 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -684,21 +735,26 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_DATA_STORAGE: case BOOK3S_INTERRUPT_DATA_STORAGE:
{ {
ulong dar = kvmppc_get_fault_dar(vcpu); ulong dar = kvmppc_get_fault_dar(vcpu);
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); u32 fault_dsisr = vcpu->arch.fault_dsisr;
u32 fault_dsisr = svcpu->fault_dsisr;
vcpu->stat.pf_storage++; vcpu->stat.pf_storage++;
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So /* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */ * treat the respective fault as segment fault. */
if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { {
struct kvmppc_book3s_shadow_vcpu *svcpu;
u32 sr;
svcpu = svcpu_get(vcpu);
sr = svcpu->sr[dar >> SID_SHIFT];
svcpu_put(svcpu);
if (sr == SR_INVALID) {
kvmppc_mmu_map_segment(vcpu, dar); kvmppc_mmu_map_segment(vcpu, dar);
r = RESUME_GUEST; r = RESUME_GUEST;
svcpu_put(svcpu);
break; break;
} }
}
#endif #endif
svcpu_put(svcpu);
/* The only case we need to handle is missing shadow PTEs */ /* The only case we need to handle is missing shadow PTEs */
if (fault_dsisr & DSISR_NOHPTE) { if (fault_dsisr & DSISR_NOHPTE) {
...@@ -745,13 +801,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -745,13 +801,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_H_EMUL_ASSIST: case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
{ {
enum emulation_result er; enum emulation_result er;
struct kvmppc_book3s_shadow_vcpu *svcpu;
ulong flags; ulong flags;
program_interrupt: program_interrupt:
svcpu = svcpu_get(vcpu); flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
flags = svcpu->shadow_srr1 & 0x1f0000ull;
svcpu_put(svcpu);
if (vcpu->arch.shared->msr & MSR_PR) { if (vcpu->arch.shared->msr & MSR_PR) {
#ifdef EXIT_DEBUG #ifdef EXIT_DEBUG
...@@ -883,9 +936,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -883,9 +936,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
default: default:
{ {
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); ulong shadow_srr1 = vcpu->arch.shadow_srr1;
ulong shadow_srr1 = svcpu->shadow_srr1;
svcpu_put(svcpu);
/* Ugh - bork here! What did we get? */ /* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
...@@ -1060,11 +1111,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1060,11 +1111,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (!vcpu_book3s) if (!vcpu_book3s)
goto out; goto out;
#ifdef CONFIG_KVM_BOOK3S_32
vcpu_book3s->shadow_vcpu = vcpu_book3s->shadow_vcpu =
kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
if (!vcpu_book3s->shadow_vcpu) if (!vcpu_book3s->shadow_vcpu)
goto free_vcpu; goto free_vcpu;
#endif
vcpu = &vcpu_book3s->vcpu; vcpu = &vcpu_book3s->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id); err = kvm_vcpu_init(vcpu, kvm, id);
if (err) if (err)
...@@ -1098,8 +1150,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1098,8 +1150,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
uninit_vcpu: uninit_vcpu:
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
free_shadow_vcpu: free_shadow_vcpu:
#ifdef CONFIG_KVM_BOOK3S_32
kfree(vcpu_book3s->shadow_vcpu); kfree(vcpu_book3s->shadow_vcpu);
free_vcpu: free_vcpu:
#endif
vfree(vcpu_book3s); vfree(vcpu_book3s);
out: out:
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -179,11 +179,15 @@ _GLOBAL(kvmppc_entry_trampoline) ...@@ -179,11 +179,15 @@ _GLOBAL(kvmppc_entry_trampoline)
li r6, MSR_IR | MSR_DR li r6, MSR_IR | MSR_DR
andc r6, r5, r6 /* Clear DR and IR in MSR value */ andc r6, r5, r6 /* Clear DR and IR in MSR value */
#ifdef CONFIG_PPC_BOOK3S_32
/* /*
* Set EE in HOST_MSR so that it's enabled when we get into our * Set EE in HOST_MSR so that it's enabled when we get into our
* C exit handler function * C exit handler function. On 64-bit we delay enabling
* interrupts until we have finished transferring stuff
* to or from the PACA.
*/ */
ori r5, r5, MSR_EE ori r5, r5, MSR_EE
#endif
mtsrr0 r7 mtsrr0 r7
mtsrr1 r6 mtsrr1 r6
RFI RFI
......
...@@ -101,17 +101,12 @@ TRACE_EVENT(kvm_exit, ...@@ -101,17 +101,12 @@ TRACE_EVENT(kvm_exit,
), ),
TP_fast_assign( TP_fast_assign(
#ifdef CONFIG_KVM_BOOK3S_PR
struct kvmppc_book3s_shadow_vcpu *svcpu;
#endif
__entry->exit_nr = exit_nr; __entry->exit_nr = exit_nr;
__entry->pc = kvmppc_get_pc(vcpu); __entry->pc = kvmppc_get_pc(vcpu);
__entry->dar = kvmppc_get_fault_dar(vcpu); __entry->dar = kvmppc_get_fault_dar(vcpu);
__entry->msr = vcpu->arch.shared->msr; __entry->msr = vcpu->arch.shared->msr;
#ifdef CONFIG_KVM_BOOK3S_PR #ifdef CONFIG_KVM_BOOK3S_PR
svcpu = svcpu_get(vcpu); __entry->srr1 = vcpu->arch.shadow_srr1;
__entry->srr1 = svcpu->shadow_srr1;
svcpu_put(svcpu);
#endif #endif
__entry->last_inst = vcpu->arch.last_inst; __entry->last_inst = vcpu->arch.last_inst;
), ),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册