提交 c60b4cf7 编写于 作者: A Alexander Graf 提交者: Avi Kivity

KVM: PPC: Add tracepoints for generic spte flushes

The different ways of flusing shadow ptes have their own debug prints which use
stupid old printk.

Let's move them to tracepoints, making them easier available, faster and
possible to activate on demand
Signed-off-by: NAlexander Graf <agraf@suse.de>
上级 c22c3196
...@@ -31,14 +31,6 @@ ...@@ -31,14 +31,6 @@
#define PTE_SIZE 12 #define PTE_SIZE 12
/* #define DEBUG_MMU */
#ifdef DEBUG_MMU
#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
#else
#define dprintk_mmu(a, ...) do { } while(0)
#endif
static struct kmem_cache *hpte_cache; static struct kmem_cache *hpte_cache;
static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
...@@ -186,9 +178,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) ...@@ -186,9 +178,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{ {
dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
vcpu->arch.hpte_cache_count, guest_ea, ea_mask);
guest_ea &= ea_mask; guest_ea &= ea_mask;
switch (ea_mask) { switch (ea_mask) {
...@@ -251,8 +241,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) ...@@ -251,8 +241,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
{ {
dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
guest_vp &= vp_mask; guest_vp &= vp_mask;
switch(vp_mask) { switch(vp_mask) {
...@@ -274,8 +263,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) ...@@ -274,8 +263,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
struct hpte_cache *pte; struct hpte_cache *pte;
int i; int i;
dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
vcpu->arch.hpte_cache_count, pa_start, pa_end);
rcu_read_lock(); rcu_read_lock();
......
...@@ -239,6 +239,29 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate, ...@@ -239,6 +239,29 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
__entry->vpage, __entry->raddr, __entry->flags) __entry->vpage, __entry->raddr, __entry->flags)
); );
TRACE_EVENT(kvm_book3s_mmu_flush,
TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
unsigned long long p2),
TP_ARGS(type, vcpu, p1, p2),
TP_STRUCT__entry(
__field( int, count )
__field( unsigned long long, p1 )
__field( unsigned long long, p2 )
__field( const char *, type )
),
TP_fast_assign(
__entry->count = vcpu->arch.hpte_cache_count;
__entry->p1 = p1;
__entry->p2 = p2;
__entry->type = type;
),
TP_printk("Flush %d %sPTEs: %llx - %llx",
__entry->count, __entry->type, __entry->p1, __entry->p2)
);
#endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_PPC_BOOK3S */
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册