提交 58a96214 编写于 作者: H Hollis Blanchard 提交者: Avi Kivity

KVM: ppc: change kvmppc_mmu_map() parameters

Passing just the TLB index will ease an e500 implementation.
Signed-off-by: NHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 475e7cdd
...@@ -55,7 +55,6 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); ...@@ -55,7 +55,6 @@ extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
/* Core-specific hooks */ /* Core-specific hooks */
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
u64 asid, u32 flags, u32 max_bytes,
unsigned int gtlb_idx); unsigned int gtlb_idx);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
......
...@@ -269,15 +269,19 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) ...@@ -269,15 +269,19 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
* Caller must ensure that the specified guest TLB entry is safe to insert into * Caller must ensure that the specified guest TLB entry is safe to insert into
* the shadow TLB. * the shadow TLB.
*/ */
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
u32 flags, u32 max_bytes, unsigned int gtlb_index) unsigned int gtlb_index)
{ {
struct kvmppc_44x_tlbe stlbe; struct kvmppc_44x_tlbe stlbe;
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
struct kvmppc_44x_shadow_ref *ref; struct kvmppc_44x_shadow_ref *ref;
struct page *new_page; struct page *new_page;
hpa_t hpaddr; hpa_t hpaddr;
gfn_t gfn; gfn_t gfn;
u32 asid = gtlbe->tid;
u32 flags = gtlbe->word2;
u32 max_bytes = get_tlb_bytes(gtlbe);
unsigned int victim; unsigned int victim;
/* Select TLB entry to clobber. Indirectly guard against races with the TLB /* Select TLB entry to clobber. Indirectly guard against races with the TLB
...@@ -448,10 +452,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -448,10 +452,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
} }
if (tlbe_is_host_safe(vcpu, tlbe)) { if (tlbe_is_host_safe(vcpu, tlbe)) {
u64 asid;
gva_t eaddr; gva_t eaddr;
gpa_t gpaddr; gpa_t gpaddr;
u32 flags;
u32 bytes; u32 bytes;
eaddr = get_tlb_eaddr(tlbe); eaddr = get_tlb_eaddr(tlbe);
...@@ -462,10 +464,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) ...@@ -462,10 +464,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
eaddr &= ~(bytes - 1); eaddr &= ~(bytes - 1);
gpaddr &= ~(bytes - 1); gpaddr &= ~(bytes - 1);
asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
flags = tlbe->word2 & 0xffff;
kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index);
} }
KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
......
...@@ -316,8 +316,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -316,8 +316,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* b) the guest used a large mapping which we're faking * b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without * Either way, we need to satisfy the fault without
* invoking the guest. */ * invoking the guest. */
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST; r = RESUME_GUEST;
} else { } else {
...@@ -364,8 +363,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -364,8 +363,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* b) the guest used a large mapping which we're faking * b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without * Either way, we need to satisfy the fault without
* invoking the guest. */ * invoking the guest. */
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
} else { } else {
/* Guest mapped and leaped at non-RAM! */ /* Guest mapped and leaped at non-RAM! */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册