提交 db93f574 编写于 作者: H Hollis Blanchard 提交者: Avi Kivity

KVM: ppc: create struct kvm_vcpu_44x and introduce container_of() accessor

This patch doesn't yet move all 44x-specific data into the new structure, but
is the first step down that path. In the future we may also want to create a
struct kvm_vcpu_booke.

Based on patch from Liu Yu <yu.liu@freescale.com>.
Signed-off-by: NHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 5cbb5106
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#ifndef __ASM_44X_H__
#define __ASM_44X_H__
#include <linux/kvm_host.h>
/* XXX Can't include mmu-44x.h because it redefines struct mm_context. */
#define PPC44x_TLB_SIZE 64
struct kvmppc_vcpu_44x {
/* Unmodified copy of the guest's TLB. */
struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE];
/* TLB that's actually used when the guest is running. */
struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
/* Pages which are referenced in the shadow TLB. */
struct page *shadow_pages[PPC44x_TLB_SIZE];
/* Track which TLB entries we've modified in the current exit. */
u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
struct kvm_vcpu vcpu;
};
static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
}
#endif /* __ASM_44X_H__ */
...@@ -74,20 +74,7 @@ struct kvmppc_44x_tlbe { ...@@ -74,20 +74,7 @@ struct kvmppc_44x_tlbe {
struct kvm_arch { struct kvm_arch {
}; };
/* XXX Can't include mmu-44x.h because it redefines struct mm_context. */
#define PPC44x_TLB_SIZE 64
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
/* Unmodified copy of the guest's TLB. */
struct kvmppc_44x_tlbe guest_tlb[PPC44x_TLB_SIZE];
/* TLB that's actually used when the guest is running. */
struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
/* Pages which are referenced in the shadow TLB. */
struct page *shadow_pages[PPC44x_TLB_SIZE];
/* Track which TLB entries we've modified in the current exit. */
u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
u32 host_stack; u32 host_stack;
u32 host_pid; u32 host_pid;
u32 host_dbcr0; u32 host_dbcr0;
......
...@@ -62,6 +62,9 @@ extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); ...@@ -62,6 +62,9 @@ extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
/* Core-specific hooks */ /* Core-specific hooks */
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
unsigned int id);
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
extern int kvmppc_core_check_processor_compat(void); extern int kvmppc_core_check_processor_compat(void);
extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
...@@ -85,6 +88,9 @@ extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -85,6 +88,9 @@ extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
#endif /* __POWERPC_KVM_PPC_H__ */ #endif /* __POWERPC_KVM_PPC_H__ */
...@@ -23,9 +23,6 @@ ...@@ -23,9 +23,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#ifdef CONFIG_KVM
#include <linux/kvm_host.h>
#endif
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <linux/time.h> #include <linux/time.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
...@@ -51,6 +48,9 @@ ...@@ -51,6 +48,9 @@
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#include <asm/iseries/alpaca.h> #include <asm/iseries/alpaca.h>
#endif #endif
#ifdef CONFIG_KVM
#include <asm/kvm_44x.h>
#endif
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include "head_booke.h" #include "head_booke.h"
...@@ -359,10 +359,14 @@ int main(void) ...@@ -359,10 +359,14 @@ int main(void)
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe)); DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe));
DEFINE(VCPU_TO_44X, offsetof(struct kvmppc_vcpu_44x, vcpu));
DEFINE(VCPU44x_SHADOW_TLB,
offsetof(struct kvmppc_vcpu_44x, shadow_tlb));
DEFINE(VCPU44x_SHADOW_MOD,
offsetof(struct kvmppc_vcpu_44x, shadow_tlb_mod));
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
......
...@@ -18,9 +18,13 @@ ...@@ -18,9 +18,13 @@
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/err.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/kvm_44x.h>
#include <asm/kvm_ppc.h>
#include "44x_tlb.h" #include "44x_tlb.h"
...@@ -124,7 +128,8 @@ int kvmppc_core_check_processor_compat(void) ...@@ -124,7 +128,8 @@ int kvmppc_core_check_processor_compat(void)
int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[0]; struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
tlbe->tid = 0; tlbe->tid = 0;
tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
...@@ -150,6 +155,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -150,6 +155,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr) struct kvm_translation *tr)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe; struct kvmppc_44x_tlbe *gtlbe;
int index; int index;
gva_t eaddr; gva_t eaddr;
...@@ -166,7 +172,7 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, ...@@ -166,7 +172,7 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
gtlbe = &vcpu->arch.guest_tlb[index]; gtlbe = &vcpu_44x->guest_tlb[index];
tr->physical_address = tlb_xlate(gtlbe, eaddr); tr->physical_address = tlb_xlate(gtlbe, eaddr);
/* XXX what does "writeable" and "usermode" even mean? */ /* XXX what does "writeable" and "usermode" even mean? */
...@@ -174,3 +180,55 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, ...@@ -174,3 +180,55 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_44x *vcpu_44x;
struct kvm_vcpu *vcpu;
int err;
vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu_44x) {
err = -ENOMEM;
goto out;
}
vcpu = &vcpu_44x->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
return vcpu;
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
out:
return ERR_PTR(err);
}
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
}
static int kvmppc_44x_init(void)
{
int r;
r = kvmppc_booke_init();
if (r)
return r;
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
}
static void kvmppc_44x_exit(void)
{
kvmppc_booke_exit();
}
module_init(kvmppc_44x_init);
module_exit(kvmppc_44x_exit);
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <asm/mmu-44x.h> #include <asm/mmu-44x.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_44x.h>
#include "44x_tlb.h" #include "44x_tlb.h"
...@@ -43,7 +44,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) ...@@ -43,7 +44,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
"nr", "tid", "word0", "word1", "word2"); "nr", "tid", "word0", "word1", "word2");
for (i = 0; i < PPC44x_TLB_SIZE; i++) { for (i = 0; i < PPC44x_TLB_SIZE; i++) {
tlbe = &vcpu->arch.guest_tlb[i]; tlbe = &vcpu_44x->guest_tlb[i];
if (tlbe->word0 & PPC44x_TLB_VALID) if (tlbe->word0 & PPC44x_TLB_VALID)
printk(" G%2d | %02X | %08X | %08X | %08X |\n", printk(" G%2d | %02X | %08X | %08X | %08X |\n",
i, tlbe->tid, tlbe->word0, tlbe->word1, i, tlbe->tid, tlbe->word0, tlbe->word1,
...@@ -51,7 +52,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) ...@@ -51,7 +52,7 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
} }
for (i = 0; i < PPC44x_TLB_SIZE; i++) { for (i = 0; i < PPC44x_TLB_SIZE; i++) {
tlbe = &vcpu->arch.shadow_tlb[i]; tlbe = &vcpu_44x->shadow_tlb[i];
if (tlbe->word0 & PPC44x_TLB_VALID) if (tlbe->word0 & PPC44x_TLB_VALID)
printk(" S%2d | %02X | %08X | %08X | %08X |\n", printk(" S%2d | %02X | %08X | %08X | %08X |\n",
i, tlbe->tid, tlbe->word0, tlbe->word1, i, tlbe->tid, tlbe->word0, tlbe->word1,
...@@ -82,11 +83,12 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) ...@@ -82,11 +83,12 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
unsigned int as) unsigned int as)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i; int i;
/* XXX Replace loop with fancy data structures. */ /* XXX Replace loop with fancy data structures. */
for (i = 0; i < PPC44x_TLB_SIZE; i++) { for (i = 0; i < PPC44x_TLB_SIZE; i++) {
struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[i]; struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
unsigned int tid; unsigned int tid;
if (eaddr < get_tlb_eaddr(tlbe)) if (eaddr < get_tlb_eaddr(tlbe))
...@@ -114,25 +116,27 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, ...@@ -114,25 +116,27 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
gva_t eaddr) gva_t eaddr)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
unsigned int as = !!(vcpu->arch.msr & MSR_IS); unsigned int as = !!(vcpu->arch.msr & MSR_IS);
unsigned int index; unsigned int index;
index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
if (index == -1) if (index == -1)
return NULL; return NULL;
return &vcpu->arch.guest_tlb[index]; return &vcpu_44x->guest_tlb[index];
} }
struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
gva_t eaddr) gva_t eaddr)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
unsigned int as = !!(vcpu->arch.msr & MSR_DS); unsigned int as = !!(vcpu->arch.msr & MSR_DS);
unsigned int index; unsigned int index;
index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
if (index == -1) if (index == -1)
return NULL; return NULL;
return &vcpu->arch.guest_tlb[index]; return &vcpu_44x->guest_tlb[index];
} }
static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe) static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
...@@ -143,8 +147,9 @@ static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe) ...@@ -143,8 +147,9 @@ static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
unsigned int index) unsigned int index)
{ {
struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct page *page = vcpu->arch.shadow_pages[index]; struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index];
struct page *page = vcpu_44x->shadow_pages[index];
if (get_tlb_v(stlbe)) { if (get_tlb_v(stlbe)) {
if (kvmppc_44x_tlbe_is_writable(stlbe)) if (kvmppc_44x_tlbe_is_writable(stlbe))
...@@ -164,7 +169,9 @@ void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) ...@@ -164,7 +169,9 @@ void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
{ {
vcpu->arch.shadow_tlb_mod[i] = 1; struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
vcpu_44x->shadow_tlb_mod[i] = 1;
} }
/* Caller must ensure that the specified guest TLB entry is safe to insert into /* Caller must ensure that the specified guest TLB entry is safe to insert into
...@@ -172,6 +179,7 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) ...@@ -172,6 +179,7 @@ void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
u32 flags) u32 flags)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct page *new_page; struct page *new_page;
struct kvmppc_44x_tlbe *stlbe; struct kvmppc_44x_tlbe *stlbe;
hpa_t hpaddr; hpa_t hpaddr;
...@@ -182,7 +190,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, ...@@ -182,7 +190,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
victim = kvmppc_tlb_44x_pos++; victim = kvmppc_tlb_44x_pos++;
if (kvmppc_tlb_44x_pos > tlb_44x_hwater) if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
kvmppc_tlb_44x_pos = 0; kvmppc_tlb_44x_pos = 0;
stlbe = &vcpu->arch.shadow_tlb[victim]; stlbe = &vcpu_44x->shadow_tlb[victim];
/* Get reference to new page. */ /* Get reference to new page. */
new_page = gfn_to_page(vcpu->kvm, gfn); new_page = gfn_to_page(vcpu->kvm, gfn);
...@@ -196,7 +204,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, ...@@ -196,7 +204,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
/* Drop reference to old page. */ /* Drop reference to old page. */
kvmppc_44x_shadow_release(vcpu, victim); kvmppc_44x_shadow_release(vcpu, victim);
vcpu->arch.shadow_pages[victim] = new_page; vcpu_44x->shadow_pages[victim] = new_page;
/* XXX Make sure (va, size) doesn't overlap any other /* XXX Make sure (va, size) doesn't overlap any other
* entries. 440x6 user manual says the result would be * entries. 440x6 user manual says the result would be
...@@ -224,12 +232,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, ...@@ -224,12 +232,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eend, u32 asid) gva_t eend, u32 asid)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
unsigned int pid = !(asid & 0xff); unsigned int pid = !(asid & 0xff);
int i; int i;
/* XXX Replace loop with fancy data structures. */ /* XXX Replace loop with fancy data structures. */
for (i = 0; i <= tlb_44x_hwater; i++) { for (i = 0; i <= tlb_44x_hwater; i++) {
struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
unsigned int tid; unsigned int tid;
if (!get_tlb_v(stlbe)) if (!get_tlb_v(stlbe))
...@@ -259,12 +268,13 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -259,12 +268,13 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
* switching address spaces. */ * switching address spaces. */
void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i; int i;
if (vcpu->arch.swap_pid) { if (vcpu->arch.swap_pid) {
/* XXX Replace loop with fancy data structures. */ /* XXX Replace loop with fancy data structures. */
for (i = 0; i <= tlb_44x_hwater; i++) { for (i = 0; i <= tlb_44x_hwater; i++) {
struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
/* Future optimization: clear only userspace mappings. */ /* Future optimization: clear only userspace mappings. */
kvmppc_44x_shadow_release(vcpu, i); kvmppc_44x_shadow_release(vcpu, i);
...@@ -303,6 +313,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, ...@@ -303,6 +313,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
{ {
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
u64 eaddr; u64 eaddr;
u64 raddr; u64 raddr;
u64 asid; u64 asid;
...@@ -317,7 +328,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -317,7 +328,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
return EMULATE_FAIL; return EMULATE_FAIL;
} }
tlbe = &vcpu->arch.guest_tlb[index]; tlbe = &vcpu_44x->guest_tlb[index];
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe->word0 & PPC44x_TLB_VALID) { if (tlbe->word0 & PPC44x_TLB_VALID) {
......
...@@ -573,7 +573,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, ...@@ -573,7 +573,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return kvmppc_core_vcpu_translate(vcpu, tr); return kvmppc_core_vcpu_translate(vcpu, tr);
} }
static int kvmppc_booke_init(void) int kvmppc_booke_init(void)
{ {
unsigned long ivor[16]; unsigned long ivor[16];
unsigned long max_ivor = 0; unsigned long max_ivor = 0;
...@@ -618,14 +618,11 @@ static int kvmppc_booke_init(void) ...@@ -618,14 +618,11 @@ static int kvmppc_booke_init(void)
flush_icache_range(kvmppc_booke_handlers, flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); return 0;
} }
static void __exit kvmppc_booke_exit(void) void __exit kvmppc_booke_exit(void)
{ {
free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
kvm_exit(); kvm_exit();
} }
module_init(kvmppc_booke_init)
module_exit(kvmppc_booke_exit)
...@@ -349,8 +349,8 @@ lightweight_exit: ...@@ -349,8 +349,8 @@ lightweight_exit:
lis r5, tlb_44x_hwater@ha lis r5, tlb_44x_hwater@ha
lwz r5, tlb_44x_hwater@l(r5) lwz r5, tlb_44x_hwater@l(r5)
mtctr r5 mtctr r5
addi r9, r4, VCPU_SHADOW_TLB addi r9, r4, -VCPU_TO_44X + VCPU44x_SHADOW_TLB
addi r5, r4, VCPU_SHADOW_MOD addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD
li r3, 0 li r3, 0
1: 1:
lbzx r7, r3, r5 lbzx r7, r3, r5
...@@ -377,7 +377,7 @@ lightweight_exit: ...@@ -377,7 +377,7 @@ lightweight_exit:
/* Clear bitmap of modified TLB entries */ /* Clear bitmap of modified TLB entries */
li r5, PPC44x_TLB_SIZE>>2 li r5, PPC44x_TLB_SIZE>>2
mtctr r5 mtctr r5
addi r5, r4, VCPU_SHADOW_MOD - 4 addi r5, r4, -VCPU_TO_44X + VCPU44x_SHADOW_MOD - 4
li r6, 0 li r6, 0
1: 1:
stwu r6, 4(r5) stwu r6, 4(r5)
......
...@@ -171,31 +171,12 @@ void kvm_arch_flush_shadow(struct kvm *kvm) ...@@ -171,31 +171,12 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{ {
struct kvm_vcpu *vcpu; return kvmppc_core_vcpu_create(kvm, id);
int err;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
goto out;
}
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
return vcpu;
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
} }
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{ {
kvm_vcpu_uninit(vcpu); kvmppc_core_vcpu_free(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
} }
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册