提交 c4befc58 编写于 作者: P Paul Mackerras 提交者: Avi Kivity

KVM: PPC: Move fields between struct kvm_vcpu_arch and kvmppc_vcpu_book3s

This moves the slb field, which represents the state of the emulated
SLB, from the kvmppc_vcpu_book3s struct to the kvm_vcpu_arch, and the
hpte_hash_[v]pte[_long] fields from kvm_vcpu_arch to kvmppc_vcpu_book3s.
This is in accord with the principle that the kvm_vcpu_arch struct
represents the state of the emulated CPU, and the kvmppc_vcpu_book3s
struct holds the auxiliary data structures used in the emulation.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 149dbdb1
...@@ -24,20 +24,6 @@ ...@@ -24,20 +24,6 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h> #include <asm/kvm_book3s_asm.h>
struct kvmppc_slb {
u64 esid;
u64 vsid;
u64 orige;
u64 origv;
bool valid : 1;
bool Ks : 1;
bool Kp : 1;
bool nx : 1;
bool large : 1; /* PTEs are 16MB */
bool tb : 1; /* 1TB segment */
bool class : 1;
};
struct kvmppc_bat { struct kvmppc_bat {
u64 raw; u64 raw;
u32 bepi; u32 bepi;
...@@ -67,11 +53,22 @@ struct kvmppc_sid_map { ...@@ -67,11 +53,22 @@ struct kvmppc_sid_map {
#define VSID_POOL_SIZE (SID_CONTEXTS * 16) #define VSID_POOL_SIZE (SID_CONTEXTS * 16)
#endif #endif
struct hpte_cache {
struct hlist_node list_pte;
struct hlist_node list_pte_long;
struct hlist_node list_vpte;
struct hlist_node list_vpte_long;
struct rcu_head rcu_head;
u64 host_va;
u64 pfn;
ulong slot;
struct kvmppc_pte pte;
};
struct kvmppc_vcpu_book3s { struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu; struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64];
struct { struct {
u64 esid; u64 esid;
u64 vsid; u64 vsid;
...@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s { ...@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s {
struct kvmppc_bat dbat[8]; struct kvmppc_bat dbat[8];
u64 hid[6]; u64 hid[6];
u64 gqr[8]; u64 gqr[8];
int slb_nr;
u64 sdr1; u64 sdr1;
u64 hior; u64 hior;
u64 msr_mask; u64 msr_mask;
...@@ -94,6 +90,13 @@ struct kvmppc_vcpu_book3s { ...@@ -94,6 +90,13 @@ struct kvmppc_vcpu_book3s {
#endif #endif
int context_id[SID_CONTEXTS]; int context_id[SID_CONTEXTS];
ulong prog_flags; /* flags to inject when giving a 700 trap */ ulong prog_flags; /* flags to inject when giving a 700 trap */
struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
int hpte_cache_count;
spinlock_t mmu_lock;
}; };
#define CONTEXT_HOST 0 #define CONTEXT_HOST 0
......
...@@ -163,16 +163,18 @@ struct kvmppc_mmu { ...@@ -163,16 +163,18 @@ struct kvmppc_mmu {
bool (*is_dcbz32)(struct kvm_vcpu *vcpu); bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
}; };
struct hpte_cache { struct kvmppc_slb {
struct hlist_node list_pte; u64 esid;
struct hlist_node list_pte_long; u64 vsid;
struct hlist_node list_vpte; u64 orige;
struct hlist_node list_vpte_long; u64 origv;
struct rcu_head rcu_head; bool valid : 1;
u64 host_va; bool Ks : 1;
u64 pfn; bool Kp : 1;
ulong slot; bool nx : 1;
struct kvmppc_pte pte; bool large : 1; /* PTEs are 16MB */
bool tb : 1; /* 1TB segment */
bool class : 1;
}; };
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
...@@ -187,6 +189,9 @@ struct kvm_vcpu_arch { ...@@ -187,6 +189,9 @@ struct kvm_vcpu_arch {
ulong highmem_handler; ulong highmem_handler;
ulong rmcall; ulong rmcall;
ulong host_paca_phys; ulong host_paca_phys;
struct kvmppc_slb slb[64];
int slb_max; /* # valid entries in slb[] */
int slb_nr; /* total number of entries in SLB */
struct kvmppc_mmu mmu; struct kvmppc_mmu mmu;
#endif #endif
...@@ -305,15 +310,6 @@ struct kvm_vcpu_arch { ...@@ -305,15 +310,6 @@ struct kvm_vcpu_arch {
struct kvm_vcpu_arch_shared *shared; struct kvm_vcpu_arch_shared *shared;
unsigned long magic_page_pa; /* phys addr to map the magic page to */ unsigned long magic_page_pa; /* phys addr to map the magic page to */
unsigned long magic_page_ea; /* effect. addr to map the magic page to */ unsigned long magic_page_ea; /* effect. addr to map the magic page to */
#ifdef CONFIG_PPC_BOOK3S
struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
int hpte_cache_count;
spinlock_t mmu_lock;
#endif
}; };
#endif /* __POWERPC_KVM_HOST_H__ */ #endif /* __POWERPC_KVM_HOST_H__ */
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "trace.h"
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -34,6 +33,8 @@ ...@@ -34,6 +33,8 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include "trace.h"
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
/* #define EXIT_DEBUG */ /* #define EXIT_DEBUG */
...@@ -1191,8 +1192,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, ...@@ -1191,8 +1192,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
} }
} else { } else {
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
...@@ -1340,7 +1341,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -1340,7 +1341,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.pvr = 0x84202; vcpu->arch.pvr = 0x84202;
#endif #endif
kvmppc_set_pvr(vcpu, vcpu->arch.pvr); kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
vcpu_book3s->slb_nr = 64; vcpu->arch.slb_nr = 64;
/* remember where some real-mode handlers are */ /* remember where some real-mode handlers are */
vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline); vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
......
...@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) ...@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
} }
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
struct kvmppc_vcpu_book3s *vcpu_book3s, struct kvm_vcpu *vcpu,
gva_t eaddr) gva_t eaddr)
{ {
int i; int i;
u64 esid = GET_ESID(eaddr); u64 esid = GET_ESID(eaddr);
u64 esid_1t = GET_ESID_1T(eaddr); u64 esid_1t = GET_ESID_1T(eaddr);
for (i = 0; i < vcpu_book3s->slb_nr; i++) { for (i = 0; i < vcpu->arch.slb_nr; i++) {
u64 cmp_esid = esid; u64 cmp_esid = esid;
if (!vcpu_book3s->slb[i].valid) if (!vcpu->arch.slb[i].valid)
continue; continue;
if (vcpu_book3s->slb[i].tb) if (vcpu->arch.slb[i].tb)
cmp_esid = esid_1t; cmp_esid = esid_1t;
if (vcpu_book3s->slb[i].esid == cmp_esid) if (vcpu->arch.slb[i].esid == cmp_esid)
return &vcpu_book3s->slb[i]; return &vcpu->arch.slb[i];
} }
dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
eaddr, esid, esid_1t); eaddr, esid, esid_1t);
for (i = 0; i < vcpu_book3s->slb_nr; i++) { for (i = 0; i < vcpu->arch.slb_nr; i++) {
if (vcpu_book3s->slb[i].vsid) if (vcpu->arch.slb[i].vsid)
dprintk(" %d: %c%c%c %llx %llx\n", i, dprintk(" %d: %c%c%c %llx %llx\n", i,
vcpu_book3s->slb[i].valid ? 'v' : ' ', vcpu->arch.slb[i].valid ? 'v' : ' ',
vcpu_book3s->slb[i].large ? 'l' : ' ', vcpu->arch.slb[i].large ? 'l' : ' ',
vcpu_book3s->slb[i].tb ? 't' : ' ', vcpu->arch.slb[i].tb ? 't' : ' ',
vcpu_book3s->slb[i].esid, vcpu->arch.slb[i].esid,
vcpu_book3s->slb[i].vsid); vcpu->arch.slb[i].vsid);
} }
return NULL; return NULL;
...@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
{ {
struct kvmppc_slb *slb; struct kvmppc_slb *slb;
slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr); slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slb) if (!slb)
return 0; return 0;
...@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, ...@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0; return 0;
} }
slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slbe) if (!slbe)
goto no_seg_found; goto no_seg_found;
...@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) ...@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
esid_1t = GET_ESID_1T(rb); esid_1t = GET_ESID_1T(rb);
slb_nr = rb & 0xfff; slb_nr = rb & 0xfff;
if (slb_nr > vcpu_book3s->slb_nr) if (slb_nr > vcpu->arch.slb_nr)
return; return;
slbe = &vcpu_book3s->slb[slb_nr]; slbe = &vcpu->arch.slb[slb_nr];
slbe->large = (rs & SLB_VSID_L) ? 1 : 0; slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
...@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) ...@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe; struct kvmppc_slb *slbe;
if (slb_nr > vcpu_book3s->slb_nr) if (slb_nr > vcpu->arch.slb_nr)
return 0; return 0;
slbe = &vcpu_book3s->slb[slb_nr]; slbe = &vcpu->arch.slb[slb_nr];
return slbe->orige; return slbe->orige;
} }
static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe; struct kvmppc_slb *slbe;
if (slb_nr > vcpu_book3s->slb_nr) if (slb_nr > vcpu->arch.slb_nr)
return 0; return 0;
slbe = &vcpu_book3s->slb[slb_nr]; slbe = &vcpu->arch.slb[slb_nr];
return slbe->origv; return slbe->origv;
} }
static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe; struct kvmppc_slb *slbe;
dprintk("KVM MMU: slbie(0x%llx)\n", ea); dprintk("KVM MMU: slbie(0x%llx)\n", ea);
slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea); slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (!slbe) if (!slbe)
return; return;
...@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) ...@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
int i; int i;
dprintk("KVM MMU: slbia()\n"); dprintk("KVM MMU: slbia()\n");
for (i = 1; i < vcpu_book3s->slb_nr; i++) for (i = 1; i < vcpu->arch.slb_nr; i++)
vcpu_book3s->slb[i].valid = false; vcpu->arch.slb[i].valid = false;
if (vcpu->arch.shared->msr & MSR_IR) { if (vcpu->arch.shared->msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_flush_segments(vcpu);
...@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, ...@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
ulong mp_ea = vcpu->arch.magic_page_ea; ulong mp_ea = vcpu->arch.magic_page_ea;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb) if (slb)
gvsid = slb->vsid; gvsid = slb->vsid;
} }
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "trace.h"
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
...@@ -29,6 +28,8 @@ ...@@ -29,6 +28,8 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include "trace.h"
#define PTE_SIZE 12 #define PTE_SIZE 12
static struct kmem_cache *hpte_cache; static struct kmem_cache *hpte_cache;
...@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) ...@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{ {
u64 index; u64 index;
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
trace_kvm_book3s_mmu_map(pte); trace_kvm_book3s_mmu_map(pte);
spin_lock(&vcpu->arch.mmu_lock); spin_lock(&vcpu3s->mmu_lock);
/* Add to ePTE list */ /* Add to ePTE list */
index = kvmppc_mmu_hash_pte(pte->pte.eaddr); index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
/* Add to ePTE_long list */ /* Add to ePTE_long list */
index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
hlist_add_head_rcu(&pte->list_pte_long, hlist_add_head_rcu(&pte->list_pte_long,
&vcpu->arch.hpte_hash_pte_long[index]); &vcpu3s->hpte_hash_pte_long[index]);
/* Add to vPTE list */ /* Add to vPTE list */
index = kvmppc_mmu_hash_vpte(pte->pte.vpage); index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
/* Add to vPTE_long list */ /* Add to vPTE_long list */
index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
hlist_add_head_rcu(&pte->list_vpte_long, hlist_add_head_rcu(&pte->list_vpte_long,
&vcpu->arch.hpte_hash_vpte_long[index]); &vcpu3s->hpte_hash_vpte_long[index]);
spin_unlock(&vcpu->arch.mmu_lock); spin_unlock(&vcpu3s->mmu_lock);
} }
static void free_pte_rcu(struct rcu_head *head) static void free_pte_rcu(struct rcu_head *head)
...@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head) ...@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head)
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
trace_kvm_book3s_mmu_invalidate(pte); trace_kvm_book3s_mmu_invalidate(pte);
/* Different for 32 and 64 bit */ /* Different for 32 and 64 bit */
kvmppc_mmu_invalidate_pte(vcpu, pte); kvmppc_mmu_invalidate_pte(vcpu, pte);
spin_lock(&vcpu->arch.mmu_lock); spin_lock(&vcpu3s->mmu_lock);
/* pte already invalidated in between? */ /* pte already invalidated in between? */
if (hlist_unhashed(&pte->list_pte)) { if (hlist_unhashed(&pte->list_pte)) {
spin_unlock(&vcpu->arch.mmu_lock); spin_unlock(&vcpu3s->mmu_lock);
return; return;
} }
...@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) ...@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
else else
kvm_release_pfn_clean(pte->pfn); kvm_release_pfn_clean(pte->pfn);
spin_unlock(&vcpu->arch.mmu_lock); spin_unlock(&vcpu3s->mmu_lock);
vcpu->arch.hpte_cache_count--; vcpu3s->hpte_cache_count--;
call_rcu(&pte->rcu_head, free_pte_rcu); call_rcu(&pte->rcu_head, free_pte_rcu);
} }
static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte; struct hpte_cache *pte;
struct hlist_node *node; struct hlist_node *node;
int i; int i;
...@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) ...@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
invalidate_pte(vcpu, pte); invalidate_pte(vcpu, pte);
...@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) ...@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node; struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
/* Find the list of entries in the map */ /* Find the list of entries in the map */
list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
rcu_read_lock(); rcu_read_lock();
...@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) ...@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node; struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
/* Find the list of entries in the map */ /* Find the list of entries in the map */
list = &vcpu->arch.hpte_hash_pte_long[ list = &vcpu3s->hpte_hash_pte_long[
kvmppc_mmu_hash_pte_long(guest_ea)]; kvmppc_mmu_hash_pte_long(guest_ea)];
rcu_read_lock(); rcu_read_lock();
...@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) ...@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
/* Flush with mask 0xfffffffff */ /* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node; struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
u64 vp_mask = 0xfffffffffULL; u64 vp_mask = 0xfffffffffULL;
list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
rcu_read_lock(); rcu_read_lock();
...@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) ...@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
/* Flush with mask 0xffffff000 */ /* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list; struct hlist_head *list;
struct hlist_node *node; struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
u64 vp_mask = 0xffffff000ULL; u64 vp_mask = 0xffffff000ULL;
list = &vcpu->arch.hpte_hash_vpte_long[ list = &vcpu3s->hpte_hash_vpte_long[
kvmppc_mmu_hash_vpte_long(guest_vp)]; kvmppc_mmu_hash_vpte_long(guest_vp)];
rcu_read_lock(); rcu_read_lock();
...@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) ...@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_node *node; struct hlist_node *node;
struct hpte_cache *pte; struct hpte_cache *pte;
int i; int i;
...@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) ...@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
if ((pte->pte.raddr >= pa_start) && if ((pte->pte.raddr >= pa_start) &&
...@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) ...@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte; struct hpte_cache *pte;
pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
vcpu->arch.hpte_cache_count++; vcpu3s->hpte_cache_count++;
if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
kvmppc_mmu_pte_flush_all(vcpu); kvmppc_mmu_pte_flush_all(vcpu);
return pte; return pte;
...@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len) ...@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
/* init hpte lookup hashes */ /* init hpte lookup hashes */
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); ARRAY_SIZE(vcpu3s->hpte_hash_pte));
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
spin_lock_init(&vcpu->arch.mmu_lock); spin_lock_init(&vcpu3s->mmu_lock);
return 0; return 0;
} }
......
...@@ -252,7 +252,7 @@ TRACE_EVENT(kvm_book3s_mmu_flush, ...@@ -252,7 +252,7 @@ TRACE_EVENT(kvm_book3s_mmu_flush,
), ),
TP_fast_assign( TP_fast_assign(
__entry->count = vcpu->arch.hpte_cache_count; __entry->count = to_book3s(vcpu)->hpte_cache_count;
__entry->p1 = p1; __entry->p1 = p1;
__entry->p2 = p2; __entry->p2 = p2;
__entry->type = type; __entry->type = type;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册